entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
Swish | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rr/crrai2rdezrkq6ksmsjeyfjzlg44wsvmcpl7xzgyx4iykkgt5ju2.py
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# sigmoid => sigmoid
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp0
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sigmoid, mul_1], Original ATen: [aten.mul, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Swish(nn.Module):
def __init__(self, num_features):
super().__init__()
self.num_features = num_features
self.scale = nn.Parameter(torch.ones(num_features))
def forward(self, x):
return x * torch.sigmoid(self.scale * x)
def extra_repr(self):
return 'num_features={}'.format(self.num_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp0
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_0[grid(256)](primals_2, primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class SwishNew(nn.Module):
def __init__(self, num_features):
super().__init__()
self.num_features = num_features
self.scale = nn.Parameter(torch.ones(num_features))
def extra_repr(self):
return 'num_features={}'.format(self.num_features)
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| rgflowopen/rg-flow | Swish | false | 7,544 | [
"MIT"
] | 1 | f1ebb56e3e51bb26ecc2f10fe61eb34cae18398b | https://github.com/rgflowopen/rg-flow/tree/f1ebb56e3e51bb26ecc2f10fe61eb34cae18398b | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, num_features):
super().__init__()
self.num_features = num_features
self.scale = nn.Parameter(torch.ones(num_features))
def forward(self, x):
return x * torch.sigmoid(self.scale * x)
def extra_repr(self):
return 'num_features={}'.format(self.num_features)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PADB | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2h/c2hwis6dmclbthilxlfmvkgfp2vghqgv67rfglegyfpqknzqevyc.py
# Topologically Sorted Source Nodes: [conv2d, distilled_c1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# distilled_c1 => gt
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mq/cmqxijmgy5ei2rdkrf4x4qisxtyeffr4myp7paxeinvhskxfaimk.py
# Topologically Sorted Source Nodes: [r_c1, add, r_c1_1], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# add => add
# r_c1 => convolution_1
# r_c1_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_3), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {})
# %where_1 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add, %mul_1), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.2
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ow/cowb7t75pe6ctxhfkbx2hve7nbo23jlj4amirc5kviyfsoxdpuqf.py
# Topologically Sorted Source Nodes: [out_cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %where_2, %where_4, %where_6], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*fp32', 8: '*fp32', 9: '*i1', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.2
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr3 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp17 & xmask, other=0.0).to(tl.int1)
tmp19 = tl.load(in_ptr4 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp17 & xmask, other=0.0)
tmp20 = tl.load(in_ptr5 + ((-4) + x1), tmp17 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tmp21 * tmp9
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tmp0 >= tmp15
tmp27 = tl.full([1], 12, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr6 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp29 & xmask, other=0.0).to(tl.int1)
tmp31 = tl.load(in_ptr7 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp29 & xmask, other=0.0)
tmp32 = tl.load(in_ptr8 + ((-8) + x1), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tmp33 * tmp9
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp29, tmp35, tmp36)
tmp38 = tmp0 >= tmp27
tmp39 = tl.full([1], 16, tl.int64)
tmp40 = tmp0 < tmp39
tmp41 = tl.load(in_ptr9 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp38 & xmask, other=0.0).to(tl.int1)
tmp42 = tl.load(in_ptr10 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp38 & xmask, other=0.0)
tmp43 = tl.load(in_ptr11 + ((-12) + x1), tmp38 & xmask, eviction_policy='evict_last', other=0.0)
tmp44 = tmp42 + tmp43
tmp45 = tmp44 * tmp9
tmp46 = tl.where(tmp41, tmp44, tmp45)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp38, tmp46, tmp47)
tmp49 = tl.where(tmp29, tmp37, tmp48)
tmp50 = tl.where(tmp17, tmp25, tmp49)
tmp51 = tl.where(tmp4, tmp13, tmp50)
tl.store(out_ptr0 + (x3), tmp51, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lo/clothqivkwnemsgxm25a4sma7vuvwypmzugcwyh3spfaf3gzzubx.py
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_16, %primals_17, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/be/cbem4zby46e2ihf34z7hven6r7xvj5vmtw5a4auzcx3uo4jhwi7l.py
# Topologically Sorted Source Nodes: [y, y_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# out => mul_7
# y => convolution_8
# y_1 => sigmoid
# Graph fragment:
# %convolution_8 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_7, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_8,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_4 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_19, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, distilled_c1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [r_c1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r_c1, add, r_c1_1], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_1.run(buf2, primals_5, primals_3, buf3, buf4, 256, grid=grid(256), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, distilled_c2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf5, primals_7, buf6, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [r_c2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf9 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [r_c2, add_1, r_c2_1], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_1.run(buf7, primals_9, buf4, buf8, buf9, 256, grid=grid(256), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, distilled_c3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf10, primals_11, buf11, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [r_c3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf14 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [r_c3, add_2, r_c3_1], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_1.run(buf12, primals_13, buf9, buf13, buf14, 256, grid=grid(256), stream=stream0)
del buf12
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1))
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_6, r_c4], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf15, primals_15, buf16, 256, grid=grid(256), stream=stream0)
buf17 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf1, buf0, primals_2, buf6, buf5, primals_7, buf11, buf10, primals_11, buf16, buf15, primals_15, buf17, 1024, grid=grid(1024), stream=stream0)
del buf0
del buf10
del buf15
del primals_11
del primals_15
del primals_2
del primals_7
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 4, 4), (64, 16, 4, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf19, primals_17, 256, grid=grid(256), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [y], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 4, 4, 4), (64, 16, 4, 1))
buf21 = buf20; del buf20 # reuse
buf22 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [y, y_1, out], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
triton_poi_fused_convolution_mul_sigmoid_4.run(buf21, primals_19, buf19, buf22, 256, grid=grid(256), stream=stream0)
del primals_19
return (buf22, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf14, buf16, buf17, buf19, buf21, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
class PA(nn.Module):
def __init__(self, nf):
super(PA, self).__init__()
self.conv = nn.Conv2d(nf, nf, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
y = self.conv(x)
y = self.sigmoid(y)
out = torch.mul(x, y)
return out
class PADB(nn.Module):
def __init__(self, in_channels, out_channels, c_weight=1):
super(PADB, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.dc = int(in_channels * c_weight)
self.c_weight = c_weight
expand_dim = in_channels
if c_weight > 1:
expand_dim = self.dc
self.expandconv = nn.Conv2d(in_channels, expand_dim, 3, padding=1)
self.c1_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c1_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c2_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c2_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c3_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c3_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c4 = nn.Conv2d(expand_dim, self.dc, 3, padding=1)
self.act = nn.LeakyReLU(0.2)
self.c5 = nn.Conv2d(self.dc * 4, out_channels, 1)
self.PA = PA(out_channels)
def forward(self, input):
if self.c_weight > 1:
input = self.act(self.expandconv(input))
distilled_c1 = self.act(self.c1_d(input))
r_c1 = self.c1_r(input)
r_c1 = self.act(r_c1 + input)
distilled_c2 = self.act(self.c2_d(r_c1))
r_c2 = self.c2_r(r_c1)
r_c2 = self.act(r_c2 + r_c1)
distilled_c3 = self.act(self.c3_d(r_c2))
r_c3 = self.c3_r(r_c2)
r_c3 = self.act(r_c3 + r_c2)
r_c4 = self.act(self.c4(r_c3))
out_cat = torch.cat([distilled_c1, distilled_c2, distilled_c3, r_c4
], dim=1)
out = self.PA(self.c5(out_cat))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.2
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0
).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.2
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 8, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tmp14 & tmp16
tmp18 = tl.load(in_ptr3 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp17 &
xmask, other=0.0).to(tl.int1)
tmp19 = tl.load(in_ptr4 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp17 &
xmask, other=0.0)
tmp20 = tl.load(in_ptr5 + (-4 + x1), tmp17 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp19 + tmp20
tmp22 = tmp21 * tmp9
tmp23 = tl.where(tmp18, tmp21, tmp22)
tmp24 = tl.full(tmp23.shape, 0.0, tmp23.dtype)
tmp25 = tl.where(tmp17, tmp23, tmp24)
tmp26 = tmp0 >= tmp15
tmp27 = tl.full([1], 12, tl.int64)
tmp28 = tmp0 < tmp27
tmp29 = tmp26 & tmp28
tmp30 = tl.load(in_ptr6 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp29 &
xmask, other=0.0).to(tl.int1)
tmp31 = tl.load(in_ptr7 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp29 &
xmask, other=0.0)
tmp32 = tl.load(in_ptr8 + (-8 + x1), tmp29 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp31 + tmp32
tmp34 = tmp33 * tmp9
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.full(tmp35.shape, 0.0, tmp35.dtype)
tmp37 = tl.where(tmp29, tmp35, tmp36)
tmp38 = tmp0 >= tmp27
tl.full([1], 16, tl.int64)
tmp41 = tl.load(in_ptr9 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp38 &
xmask, other=0.0).to(tl.int1)
tmp42 = tl.load(in_ptr10 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp38 &
xmask, other=0.0)
tmp43 = tl.load(in_ptr11 + (-12 + x1), tmp38 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp44 = tmp42 + tmp43
tmp45 = tmp44 * tmp9
tmp46 = tl.where(tmp41, tmp44, tmp45)
tmp47 = tl.full(tmp46.shape, 0.0, tmp46.dtype)
tmp48 = tl.where(tmp38, tmp46, tmp47)
tmp49 = tl.where(tmp29, tmp37, tmp48)
tmp50 = tl.where(tmp17, tmp25, tmp49)
tmp51 = tl.where(tmp4, tmp13, tmp50)
tl.store(out_ptr0 + x3, tmp51, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_4(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf0,
primals_2, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf2 = extern_kernels.convolution(primals_3, primals_4, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_leaky_relu_1[grid(256)](buf2,
primals_5, primals_3, buf3, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 16, 4, 1))
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf5,
primals_7, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 16, 4, 1))
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf9 = buf2
del buf2
triton_poi_fused_add_convolution_leaky_relu_1[grid(256)](buf7,
primals_9, buf4, buf8, buf9, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_9
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf10,
primals_11, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf9, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 4, 4), (64, 16, 4, 1))
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf14 = buf7
del buf7
triton_poi_fused_add_convolution_leaky_relu_1[grid(256)](buf12,
primals_13, buf9, buf13, buf14, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf12
del primals_13
buf15 = extern_kernels.convolution(buf14, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 4, 4, 4), (64, 16, 4, 1))
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_0[grid(256)](buf15,
primals_15, buf16, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf17 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.
float32)
triton_poi_fused_cat_2[grid(1024)](buf1, buf0, primals_2, buf6,
buf5, primals_7, buf11, buf10, primals_11, buf16, buf15,
primals_15, buf17, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del buf10
del buf15
del primals_11
del primals_15
del primals_2
del primals_7
buf18 = extern_kernels.convolution(buf17, primals_16, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 4, 4), (64, 16, 4, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_3[grid(256)](buf19, primals_17, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf20 = extern_kernels.convolution(buf19, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 4, 4, 4), (64, 16, 4, 1))
buf21 = buf20
del buf20
buf22 = buf5
del buf5
triton_poi_fused_convolution_mul_sigmoid_4[grid(256)](buf21,
primals_19, buf19, buf22, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_19
return (buf22, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18, buf1,
buf3, buf4, buf6, buf8, buf9, buf11, buf13, buf14, buf16, buf17,
buf19, buf21)
class PA(nn.Module):
def __init__(self, nf):
super(PA, self).__init__()
self.conv = nn.Conv2d(nf, nf, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
y = self.conv(x)
y = self.sigmoid(y)
out = torch.mul(x, y)
return out
class PADBNew(nn.Module):
def __init__(self, in_channels, out_channels, c_weight=1):
super(PADBNew, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.dc = int(in_channels * c_weight)
self.c_weight = c_weight
expand_dim = in_channels
if c_weight > 1:
expand_dim = self.dc
self.expandconv = nn.Conv2d(in_channels, expand_dim, 3, padding=1)
self.c1_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c1_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c2_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c2_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c3_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c3_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c4 = nn.Conv2d(expand_dim, self.dc, 3, padding=1)
self.act = nn.LeakyReLU(0.2)
self.c5 = nn.Conv2d(self.dc * 4, out_channels, 1)
self.PA = PA(out_channels)
def forward(self, input_0):
primals_1 = self.c1_d.weight
primals_2 = self.c1_d.bias
primals_4 = self.c1_r.weight
primals_5 = self.c1_r.bias
primals_6 = self.c2_d.weight
primals_7 = self.c2_d.bias
primals_8 = self.c2_r.weight
primals_9 = self.c2_r.bias
primals_10 = self.c3_d.weight
primals_11 = self.c3_d.bias
primals_12 = self.c3_r.weight
primals_13 = self.c3_r.bias
primals_14 = self.c4.weight
primals_15 = self.c4.bias
primals_16 = self.c5.weight
primals_17 = self.c5.bias
primals_18 = self.PA.conv.weight
primals_19 = self.PA.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0]
| qwopqwop200/Fast-Invertible-Rescaling-Net | PADB | false | 7,545 | [
"MIT"
] | 1 | 871733f2eee7929d6b37c4d1d6a27347b39b67a9 | https://github.com/qwopqwop200/Fast-Invertible-Rescaling-Net/tree/871733f2eee7929d6b37c4d1d6a27347b39b67a9 | import torch
import torch.utils.data
import torch.nn as nn
class PA(nn.Module):
def __init__(self, nf):
super().__init__()
self.conv = nn.Conv2d(nf, nf, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
y = self.conv(x)
y = self.sigmoid(y)
out = torch.mul(x, y)
return out
class Model(nn.Module):
def __init__(self, in_channels, out_channels, c_weight=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.dc = int(in_channels * c_weight)
self.c_weight = c_weight
expand_dim = in_channels
if c_weight > 1:
expand_dim = self.dc
self.expandconv = nn.Conv2d(in_channels, expand_dim, 3, padding=1)
self.c1_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c1_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c2_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c2_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c3_d = nn.Conv2d(expand_dim, self.dc, 1)
self.c3_r = nn.Conv2d(expand_dim, expand_dim, 3, padding=1)
self.c4 = nn.Conv2d(expand_dim, self.dc, 3, padding=1)
self.act = nn.LeakyReLU(0.2)
self.c5 = nn.Conv2d(self.dc * 4, out_channels, 1)
self.PA = PA(out_channels)
def forward(self, input):
if self.c_weight > 1:
input = self.act(self.expandconv(input))
distilled_c1 = self.act(self.c1_d(input))
r_c1 = self.c1_r(input)
r_c1 = self.act(r_c1 + input)
distilled_c2 = self.act(self.c2_d(r_c1))
r_c2 = self.c2_r(r_c1)
r_c2 = self.act(r_c2 + r_c1)
distilled_c3 = self.act(self.c3_d(r_c2))
r_c3 = self.c3_r(r_c2)
r_c3 = self.act(r_c3 + r_c2)
r_c4 = self.act(self.c4(r_c3))
out_cat = torch.cat([distilled_c1, distilled_c2, distilled_c3, r_c4
], dim=1)
out = self.PA(self.c5(out_cat))
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
tLNv2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rs/crsnywfgmmsx4owavufb4dscuwupmbyjscwr76bbv6xspf54k5jh.py
# Topologically Sorted Source Nodes: [mean_1, mean_2, mean_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# mean_1 => add
# mean_2 => add_1
# mean_3 => add_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select, %select_1), kwargs = {})
# %select_scatter_default : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%primals_1, %add, 3, 0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_2, %select_4), kwargs = {})
# %select_scatter_default_1 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default, %add_1, 3, 0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_5, %select_7), kwargs = {})
# %select_scatter_default_2 : [num_users=5] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_1, %add_2, 3, 0), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp4 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp6 = tmp4 + tmp5
tmp7 = tl.where(tmp3, tmp6, tmp4)
tmp8 = tl.full([1], 2, tl.int32)
tmp9 = tmp8 == tmp1
tmp11 = tl.where(tmp9, tmp6, tmp10)
tmp12 = tmp7 + tmp11
tmp13 = tl.where(tmp3, tmp12, tmp7)
tmp14 = tl.full([1], 3, tl.int32)
tmp15 = tmp14 == tmp1
tmp17 = tl.where(tmp15, tmp6, tmp16)
tmp18 = tl.where(tmp15, tmp12, tmp17)
tmp19 = tmp13 + tmp18
tmp21 = tl.where(tmp2, tmp6, tmp20)
tmp22 = tl.where(tmp2, tmp12, tmp21)
tmp23 = tl.where(tmp2, tmp19, tmp22)
tl.store(out_ptr0 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bj/cbjpy7gm4gjedatir5qy7auyeeo7oqzof6u47err2ci3ozaq5n6z.py
# Topologically Sorted Source Nodes: [mean_4, sub, pow_1, mean_6, mean_7, mean_8], Original ATen: [aten.div, aten.sub, aten.pow, aten.add]
# Source node to ATen node mapping:
# mean_4 => div
# mean_6 => add_3
# mean_7 => add_4
# mean_8 => add_5
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%unsqueeze_1, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_2, %div), kwargs = {})
# %pow_1 : [num_users=3] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_10, %select_11), kwargs = {})
# %select_scatter_default_3 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%pow_1, %add_3, 3, 0), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_12, %select_14), kwargs = {})
# %select_scatter_default_4 : [num_users=3] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_3, %add_4, 3, 0), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%select_15, %select_17), kwargs = {})
# %select_scatter_default_5 : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%select_scatter_default_4, %add_5, 3, 0), kwargs = {})
triton_poi_fused_add_div_pow_sub_1 = async_compile.triton('triton_poi_fused_add_div_pow_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_pow_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp4 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (x2), xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp5 = 0.25
tmp6 = tmp4 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = tmp7 * tmp7
tmp10 = tmp9 - tmp6
tmp11 = tmp10 * tmp10
tmp12 = tmp8 + tmp11
tmp13 = tl.where(tmp3, tmp12, tmp8)
tmp14 = tl.full([1], 2, tl.int32)
tmp15 = tmp14 == tmp1
tmp17 = tmp16 - tmp6
tmp18 = tmp17 * tmp17
tmp19 = tl.where(tmp15, tmp12, tmp18)
tmp20 = tmp13 + tmp19
tmp21 = tl.where(tmp3, tmp20, tmp13)
tmp22 = tl.full([1], 3, tl.int32)
tmp23 = tmp22 == tmp1
tmp25 = tmp24 - tmp6
tmp26 = tmp25 * tmp25
tmp27 = tl.where(tmp23, tmp12, tmp26)
tmp28 = tl.where(tmp23, tmp20, tmp27)
tmp29 = tmp21 + tmp28
tmp31 = tmp30 - tmp6
tmp32 = tmp31 * tmp31
tmp33 = tl.where(tmp2, tmp12, tmp32)
tmp34 = tl.where(tmp2, tmp20, tmp33)
tmp35 = tl.where(tmp2, tmp29, tmp34)
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bh/cbhh24rywaugwnpzje6fccgvksyqpmthcmbtrwr5wsew7niz2cal.py
# Topologically Sorted Source Nodes: [truediv_1, add, std], Original ATen: [aten.div, aten.add, aten.sqrt]
# Source node to ATen node mapping:
# add => add_6
# std => sqrt
# truediv_1 => div_1
# Graph fragment:
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%unsqueeze_3, 4), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_1, 1e-08), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_6,), kwargs = {})
triton_poi_fused_add_div_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = libdevice.sqrt(tmp4)
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xp/cxpikcr4kkuns4utqmqad22uw6hnlhsakqxh4hpiv244g4luz6so.py
# Topologically Sorted Source Nodes: [sub_1, x, mul, add_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.add]
# Source node to ATen node mapping:
# add_1 => add_7
# mul => mul
# sub_1 => sub_1
# x => div_2
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_scatter_default_2, %expand), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %expand_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %expand_2), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {})
# %copy_ : [num_users=0] = call_function[target=torch.ops.aten.copy_.default](args = (%primals_1, %select_scatter_default_2), kwargs = {})
triton_poi_fused_add_div_mul_sub_3 = async_compile.triton('triton_poi_fused_add_div_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_3', 'mutated_arg_names': ['out_ptr1'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = (xindex // 4)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (4*x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp2 = 0.25
tmp3 = tmp1 * tmp2
tmp4 = tmp0 - tmp3
tmp6 = tmp4 / tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + (x4), tmp10, xmask)
tl.store(out_ptr1 + (x4), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_1, mean_2, mean_3], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_4, sub, pow_1, mean_6, mean_7, mean_8], Original ATen: [aten.div, aten.sub, aten.pow, aten.add]
triton_poi_fused_add_div_pow_sub_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv_1, add, std], Original ATen: [aten.div, aten.add, aten.sqrt]
triton_poi_fused_add_div_sqrt_2.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub_1, x, mul, add_1], Original ATen: [aten.sub, aten.div, aten.mul, aten.add]
triton_poi_fused_add_div_mul_sub_3.run(buf0, buf2, primals_2, primals_3, buf3, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
del primals_3
return (buf3, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.autograd import Variable
def my_mean(x):
f = x.shape[-1]
mean = x[..., 0]
for i in range(1, f):
mean += x[..., i]
return mean[..., None] / f
class tLNv2(nn.Module):
def __init__(self, dimension, eps=1e-08, trainable=True):
super(tLNv2, self).__init__()
self.eps = eps
if trainable:
self.gain = nn.Parameter(torch.ones(1, dimension, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, dimension, 1, 1))
else:
self.gain = Variable(torch.ones(1, dimension, 1, 1),
requires_grad=False)
self.bias = Variable(torch.zeros(1, dimension, 1, 1),
requires_grad=False)
def forward(self, inp):
inp.size(0)
mean = my_mean(inp)
std = torch.sqrt(my_mean((inp - mean) ** 2) + self.eps)
x = (inp - mean.expand_as(inp)) / std.expand_as(inp)
return x * self.gain.expand_as(x).type(x.type()) + self.bias.expand_as(
x).type(x.type())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dimension': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp4 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp6 = tmp4 + tmp5
tmp7 = tl.where(tmp3, tmp6, tmp4)
tmp8 = tl.full([1], 2, tl.int32)
tmp9 = tmp8 == tmp1
tmp11 = tl.where(tmp9, tmp6, tmp10)
tmp12 = tmp7 + tmp11
tmp13 = tl.where(tmp3, tmp12, tmp7)
tmp14 = tl.full([1], 3, tl.int32)
tmp15 = tmp14 == tmp1
tmp17 = tl.where(tmp15, tmp6, tmp16)
tmp18 = tl.where(tmp15, tmp12, tmp17)
tmp19 = tmp13 + tmp18
tmp21 = tl.where(tmp2, tmp6, tmp20)
tmp22 = tl.where(tmp2, tmp12, tmp21)
tmp23 = tl.where(tmp2, tmp19, tmp22)
tl.store(out_ptr0 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_div_pow_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp4 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr0 + x2, xmask)
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp3 = tmp1 == tmp1
tmp5 = 0.25
tmp6 = tmp4 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = tmp7 * tmp7
tmp10 = tmp9 - tmp6
tmp11 = tmp10 * tmp10
tmp12 = tmp8 + tmp11
tmp13 = tl.where(tmp3, tmp12, tmp8)
tmp14 = tl.full([1], 2, tl.int32)
tmp15 = tmp14 == tmp1
tmp17 = tmp16 - tmp6
tmp18 = tmp17 * tmp17
tmp19 = tl.where(tmp15, tmp12, tmp18)
tmp20 = tmp13 + tmp19
tmp21 = tl.where(tmp3, tmp20, tmp13)
tmp22 = tl.full([1], 3, tl.int32)
tmp23 = tmp22 == tmp1
tmp25 = tmp24 - tmp6
tmp26 = tmp25 * tmp25
tmp27 = tl.where(tmp23, tmp12, tmp26)
tmp28 = tl.where(tmp23, tmp20, tmp27)
tmp29 = tmp21 + tmp28
tmp31 = tmp30 - tmp6
tmp32 = tmp31 * tmp31
tmp33 = tl.where(tmp2, tmp12, tmp32)
tmp34 = tl.where(tmp2, tmp20, tmp33)
tmp35 = tl.where(tmp2, tmp29, tmp34)
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused_add_div_sqrt_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 1e-08
tmp4 = tmp2 + tmp3
tmp5 = libdevice.sqrt(tmp4)
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = xindex // 4
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp2 = 0.25
tmp3 = tmp1 * tmp2
tmp4 = tmp0 - tmp3
tmp6 = tmp4 / tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 + tmp9
tl.store(out_ptr0 + x4, tmp10, xmask)
tl.store(out_ptr1 + x4, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](primals_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_pow_sub_1[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_add_div_sqrt_2[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
triton_poi_fused_add_div_mul_sub_3[grid(256)](buf0, buf2, primals_2,
primals_3, buf3, primals_1, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
del primals_2
del primals_3
return buf3, buf0, buf2
def my_mean(x):
f = x.shape[-1]
mean = x[..., 0]
for i in range(1, f):
mean += x[..., i]
return mean[..., None] / f
class tLNv2New(nn.Module):
def __init__(self, dimension, eps=1e-08, trainable=True):
super(tLNv2New, self).__init__()
self.eps = eps
if trainable:
self.gain = nn.Parameter(torch.ones(1, dimension, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, dimension, 1, 1))
else:
self.gain = Variable(torch.ones(1, dimension, 1, 1),
requires_grad=False)
self.bias = Variable(torch.zeros(1, dimension, 1, 1),
requires_grad=False)
def forward(self, input_0):
primals_2 = self.gain
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| rbodo/pytorch-OpCounter | tLNv2 | false | 7,546 | [
"MIT"
] | 1 | 1857cbb5f9e53343fb349af84efdfde2554a2691 | https://github.com/rbodo/pytorch-OpCounter/tree/1857cbb5f9e53343fb349af84efdfde2554a2691 | import torch
import torch.nn as nn
from torch.autograd import Variable
def my_mean(x):
f = x.shape[-1]
mean = x[..., 0]
for i in range(1, f):
mean += x[..., i]
return mean[..., None] / f
class Model(nn.Module):
def __init__(self, dimension, eps=1e-08, trainable=True):
super().__init__()
self.eps = eps
if trainable:
self.gain = nn.Parameter(torch.ones(1, dimension, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, dimension, 1, 1))
else:
self.gain = Variable(torch.ones(1, dimension, 1, 1),
requires_grad=False)
self.bias = Variable(torch.zeros(1, dimension, 1, 1),
requires_grad=False)
def forward(self, inp):
inp.size(0)
mean = my_mean(inp)
std = torch.sqrt(my_mean((inp - mean) ** 2) + self.eps)
x = (inp - mean.expand_as(inp)) / std.expand_as(inp)
return x * self.gain.expand_as(x).type(x.type()) + self.bias.expand_as(
x).type(x.type())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Net1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lg/clgnkdclmu24adiu7cbx7ybrhsind74uypy3cvuihzwv4hxzyjlf.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s6/cs6oowloxihsa2iqf677aggor4dgxx3q4fq7upsksc3m7rf4j6xb.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 921600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3600) % 64
x0 = xindex % 3600
x4 = (xindex // 3600)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (3616*x4)), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6v/c6votzbefolf4fzzmnc4okxekuuh37ajyccnmrjo4ze3cvigljbw.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_4 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = (xindex // 30) % 30
x2 = (xindex // 900)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + (2*x0) + (120*x1) + (3616*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 492032, grid=grid(492032), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 60, 60), (230400, 3600, 60, 1))
buf3 = empty_strided_cuda((4, 64, 60, 60), (231424, 3616, 60, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_5, buf3, 921600, grid=grid(921600), stream=stream0)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1), torch.int8)
buf5 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 230400, grid=grid(230400), stream=stream0)
return (reinterpret_tensor(buf5, (4, 57600), (57600, 1), 0), primals_1, primals_3, primals_4, buf1, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
import torch.optim
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 492032
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 64
x0 = xindex % 3600
x4 = xindex // 3600
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 30
x1 = xindex // 30 % 30
x2 = xindex // 900
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 62, 62), (123008, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(492032)](buf1, primals_2,
492032, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 60, 60), (230400, 3600, 60, 1))
buf3 = empty_strided_cuda((4, 64, 60, 60), (231424, 3616, 60, 1),
torch.float32)
triton_poi_fused_convolution_relu_1[grid(921600)](buf2, primals_5,
buf3, 921600, XBLOCK=1024, num_warps=4, num_stages=1)
del buf2
del primals_5
buf4 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 64, 30, 30), (57600, 900, 30, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_2[grid(230400)](buf3, buf4,
buf5, 230400, XBLOCK=512, num_warps=8, num_stages=1)
return reinterpret_tensor(buf5, (4, 57600), (57600, 1), 0
), primals_1, primals_3, primals_4, buf1, buf3, buf4
class Net1New(nn.Module):
def __init__(self):
super(Net1New, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ringier-data/deep-learning-containers | Net1 | false | 7,547 | [
"Apache-2.0"
] | 1 | e939ceee48a426f9ae4e0b50317dc2fa8845a312 | https://github.com/ringier-data/deep-learning-containers/tree/e939ceee48a426f9ae4e0b50317dc2fa8845a312 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
import torch.optim
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
F_fully_convolutional | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cb/ccbgymnr2fvk43axzcuowohjalipdfn2nc4qqvidfjzuqhtxsj6g.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (36*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/j5/cj5nf2owtsdm2zwcezqxpyn63iwddjyadpotkhm2ua52inoqxdcl.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/t6/ct6zirskpfqsqbhnjmn23xlleilffwmhs2zollktf3dfb4p7sip3.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 66560
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 260
y1 = (yindex // 260)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (260*x2) + (2340*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hg/chg7sirnnhnknz3ptn3eoozzgbzupxzm5x6senlay6ofgympgnsv.py
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x1 => gt
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dl/cdlfh7a22exmfr3ajvsn6fluwbocwskyu3xwvsimvoduia6uozcl.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where], 1), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 260
x1 = (xindex // 260)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 260, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((256*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp10 = tl.load(in_ptr2 + ((256*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr3 + ((-4) + x0), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.02
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp5, tmp17)
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vq/cvqzjkjohlrzvzpfntwnuriz72pk6hkhjyhcvmyaozp225qe7yfo.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where, %where_1], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 33024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 516
x1 = (xindex // 516)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 260, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((256*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp11 = tl.load(in_ptr2 + ((256*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr3 + ((-4) + x0), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = 0.02
tmp15 = tmp13 * tmp14
tmp16 = tl.where(tmp10, tmp13, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp9, tmp16, tmp17)
tmp19 = tmp0 >= tmp7
tmp20 = tl.full([1], 516, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tl.load(in_ptr4 + ((256*x1) + ((-260) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp23 = tl.load(in_ptr5 + ((256*x1) + ((-260) + x0)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr6 + ((-260) + x0), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 * tmp14
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp19, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp18, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6f/c6f22zofgmmauujzzpvikmba5mkauq7gnmpdgxxhjctuzlhxxl55.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_6 = async_compile.triton('triton_poi_fused_convolution_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 260, 3, 3), (2340, 9, 3, 1))
assert_size_stride(primals_5, (256, ), (1, ))
assert_size_stride(primals_6, (4, 516, 1, 1), (516, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 1024, 9, grid=grid(1024, 9), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((256, 260, 3, 3), (2340, 1, 780, 260), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 66560, 9, grid=grid(66560, 9), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf4 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf3, primals_2, buf4, 16384, grid=grid(16384), stream=stream0)
buf5 = empty_strided_cuda((4, 260, 4, 4), (4160, 1, 1040, 260), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf1, buf4, buf3, primals_2, buf5, 16640, grid=grid(16640), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf6, primals_5, buf7, 16384, grid=grid(16384), stream=stream0)
buf8 = empty_strided_cuda((4, 516, 4, 4), (8256, 1, 2064, 516), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf1, buf4, buf3, primals_2, buf7, buf6, primals_5, buf8, 33024, grid=grid(33024), stream=stream0)
del buf3
del buf6
del primals_2
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 1, 16, 4))
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_6.run(buf9, primals_7, buf10, 16, 16, grid=grid(16, 16), stream=stream0)
del buf9
del primals_7
return (buf10, buf0, buf1, buf2, primals_6, buf4, buf5, buf7, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 260, 3, 3), (2340, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 516, 1, 1), (516, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class F_fully_convolutional(nn.Module):
def __init__(self, in_channels, out_channels, internal_size=256,
kernel_size=3, leaky_slope=0.02):
super().__init__()
pad = kernel_size // 2
self.leaky_slope = leaky_slope
self.conv1 = nn.Conv2d(in_channels, internal_size, kernel_size=
kernel_size, padding=pad)
self.conv2 = nn.Conv2d(in_channels + internal_size, internal_size,
kernel_size=kernel_size, padding=pad)
self.conv3 = nn.Conv2d(in_channels + 2 * internal_size,
out_channels, kernel_size=1, padding=0)
def forward(self, x):
x1 = F.leaky_relu(self.conv1(x), self.leaky_slope)
x2 = F.leaky_relu(self.conv2(torch.cat([x, x1], 1)), self.leaky_slope)
return self.conv3(torch.cat([x, x1, x2], 1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 4 * x2 + 36 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 260
y1 = yindex // 260
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 260 * x2 + 2340 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16640
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 260
x1 = xindex // 260
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 260, tl.int64)
tmp9 = tl.load(in_ptr1 + (256 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (256 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tl.load(in_ptr3 + (-4 + x0), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = 0.02
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp16 = tl.full(tmp15.shape, 0.0, tmp15.dtype)
tmp17 = tl.where(tmp6, tmp15, tmp16)
tmp18 = tl.where(tmp4, tmp5, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 33024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 516
x1 = xindex // 516
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 260, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (256 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp11 = tl.load(in_ptr2 + (256 * x1 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.load(in_ptr3 + (-4 + x0), tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = 0.02
tmp15 = tmp13 * tmp14
tmp16 = tl.where(tmp10, tmp13, tmp15)
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp9, tmp16, tmp17)
tmp19 = tmp0 >= tmp7
tl.full([1], 516, tl.int64)
tmp22 = tl.load(in_ptr4 + (256 * x1 + (-260 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp23 = tl.load(in_ptr5 + (256 * x1 + (-260 + x0)), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr6 + (-260 + x0), tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tmp25 * tmp14
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp19, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp18, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_convolution_6(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (256, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (256, 260, 3, 3), (2340, 9, 3, 1))
assert_size_stride(primals_5, (256,), (1,))
assert_size_stride(primals_6, (4, 516, 1, 1), (516, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 4, 3, 3), (36, 1, 12, 4), torch.float32
)
get_raw_stream(0)
triton_poi_fused_0[grid(1024, 9)](primals_1, buf0, 1024, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_1[grid(16, 16)](primals_3, buf1, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((256, 260, 3, 3), (2340, 1, 780, 260),
torch.float32)
triton_poi_fused_2[grid(66560, 9)](primals_4, buf2, 66560, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf4 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(16384)](buf3,
primals_2, buf4, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf5 = empty_strided_cuda((4, 260, 4, 4), (4160, 1, 1040, 260),
torch.float32)
triton_poi_fused_cat_4[grid(16640)](buf1, buf4, buf3, primals_2,
buf5, 16640, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 4, 4), (4096, 1, 1024, 256))
buf7 = empty_strided_cuda((4, 256, 4, 4), (4096, 1, 1024, 256),
torch.bool)
triton_poi_fused_convolution_leaky_relu_3[grid(16384)](buf6,
primals_5, buf7, 16384, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 516, 4, 4), (8256, 1, 2064, 516),
torch.float32)
triton_poi_fused_cat_5[grid(33024)](buf1, buf4, buf3, primals_2,
buf7, buf6, primals_5, buf8, 33024, XBLOCK=512, num_warps=4,
num_stages=1)
del buf3
del buf6
del primals_2
del primals_5
buf9 = extern_kernels.convolution(buf8, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 4, 4, 4), (64, 1, 16, 4))
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_6[grid(16, 16)](buf9, primals_7, buf10,
16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf9
del primals_7
return buf10, buf0, buf1, buf2, primals_6, buf4, buf5, buf7, buf8
class F_fully_convolutionalNew(nn.Module):
def __init__(self, in_channels, out_channels, internal_size=256,
kernel_size=3, leaky_slope=0.02):
super().__init__()
pad = kernel_size // 2
self.leaky_slope = leaky_slope
self.conv1 = nn.Conv2d(in_channels, internal_size, kernel_size=
kernel_size, padding=pad)
self.conv2 = nn.Conv2d(in_channels + internal_size, internal_size,
kernel_size=kernel_size, padding=pad)
self.conv3 = nn.Conv2d(in_channels + 2 * internal_size,
out_channels, kernel_size=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ramonpeter/LaSeR | F_fully_convolutional | false | 7,548 | [
"MIT"
] | 1 | 28daa6876256501ed0d3e84a4ddfedc7892bd528 | https://github.com/ramonpeter/LaSeR/tree/28daa6876256501ed0d3e84a4ddfedc7892bd528 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels, out_channels, internal_size=256,
kernel_size=3, leaky_slope=0.02):
super().__init__()
pad = kernel_size // 2
self.leaky_slope = leaky_slope
self.conv1 = nn.Conv2d(in_channels, internal_size, kernel_size=
kernel_size, padding=pad)
self.conv2 = nn.Conv2d(in_channels + internal_size, internal_size,
kernel_size=kernel_size, padding=pad)
self.conv3 = nn.Conv2d(in_channels + 2 * internal_size,
out_channels, kernel_size=1, padding=0)
def forward(self, x):
x1 = F.leaky_relu(self.conv1(x), self.leaky_slope)
x2 = F.leaky_relu(self.conv2(torch.cat([x, x1], 1)), self.leaky_slope)
return self.conv3(torch.cat([x, x1, x2], 1))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DepthConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dw/cdwgxzf2xoodnratk5sxw5fhm27r6outnj7ah5fxpossd3i43oah.py
# Topologically Sorted Source Nodes: [prelu, sum_1, mean, sub, pow_1, sum_2, truediv_1], Original ATen: [aten._prelu_kernel, aten.sum, aten.div, aten.sub, aten.pow]
# Source node to ATen node mapping:
# mean => div
# pow_1 => pow_1
# prelu => gt, mul, where
# sub => sub
# sum_1 => sum_1
# sum_2 => sum_2
# truediv_1 => div_1
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [3], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %div), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [3], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, 4), kwargs = {})
triton_poi_fused__prelu_kernel_div_pow_sub_sum_1 = async_compile.triton('triton_poi_fused__prelu_kernel_div_pow_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_div_pow_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_div_pow_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tmp8 = tmp7 > tmp1
tmp9 = tmp4 * tmp7
tmp10 = tl.where(tmp8, tmp7, tmp9)
tmp11 = tmp6 + tmp10
tmp13 = tmp12 > tmp1
tmp14 = tmp4 * tmp12
tmp15 = tl.where(tmp13, tmp12, tmp14)
tmp16 = tmp11 + tmp15
tmp18 = tmp17 > tmp1
tmp19 = tmp4 * tmp17
tmp20 = tl.where(tmp18, tmp17, tmp19)
tmp21 = tmp16 + tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tmp24 = tmp6 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp10 - tmp23
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp15 - tmp23
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = tmp20 - tmp23
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp34 * tmp22
tl.store(out_ptr0 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jf/cjfzj6re4sov2j5d4vchh6pzmo6n4hte3t4emmdcco5tgidbioet.py
# Topologically Sorted Source Nodes: [prelu, sub_1, x, mul, output], Original ATen: [aten._prelu_kernel, aten.sub, aten.div, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_1
# output => add_1
# prelu => gt, mul, where
# sub_1 => sub_1
# x => div_2
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %convolution), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %expand), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_1, %expand_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_2, %expand_2), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %expand_3), kwargs = {})
triton_poi_fused__prelu_kernel_add_div_mul_sub_2 = async_compile.triton('triton_poi_fused__prelu_kernel_add_div_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_add_div_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_add_div_mul_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = (xindex // 4)
x2 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tmp8 = tmp6 - tmp7
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp8 / tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + (x4), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nd/cndqqqyycgjtsoaoj3pswjke2wmpmrwf4qdoka7tc7hsbqres2nz.py
# Topologically Sorted Source Nodes: [conv2d_1, prelu_1, sum_3, mean_1, sub_2, pow_2, sum_4, truediv_4, add_2, std_1, sub_3, x_1, mul_1, output_1], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sum, aten.div, aten.sub, aten.pow, aten.add, aten.sqrt, aten.mul]
# Source node to ATen node mapping:
# add_2 => add_2
# conv2d_1 => convolution_1
# mean_1 => div_3
# mul_1 => mul_3
# output_1 => add_3
# pow_2 => pow_2
# prelu_1 => gt_1, mul_2, where_1
# std_1 => sqrt_1
# sub_2 => sub_2
# sub_3 => sub_3
# sum_3 => sum_3
# sum_4 => sum_4
# truediv_4 => div_4
# x_1 => div_5
# Graph fragment:
# %convolution_1 : [num_users=4] = call_function[target=torch.ops.aten.convolution.default](args = (%add_1, %primals_7, %primals_8, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %convolution_1), kwargs = {})
# %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where_1, [3], True), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, 1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %div_3), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [3], True), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_4, 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_4, 1e-08), kwargs = {})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %expand_4), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_3, %expand_5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_5, %expand_6), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %expand_7), kwargs = {})
triton_poi_fused__prelu_kernel_add_convolution_div_mul_pow_sqrt_sub_sum_3 = async_compile.triton('triton_poi_fused__prelu_kernel_add_convolution_div_mul_pow_sqrt_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__prelu_kernel_add_convolution_div_mul_pow_sqrt_sub_sum_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__prelu_kernel_add_convolution_div_mul_pow_sqrt_sub_sum_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp18 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp9
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp11 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vb/cvbsvlw6fkwmwaktur7n3jsrcplxs2mytugr2p3372pgohe2ykab.py
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# output_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_3, %primals_12, %primals_13, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_4 = async_compile.triton('triton_poi_fused_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, ), (1, ))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [prelu, sum_1, mean, sub, pow_1, sum_2, truediv_1], Original ATen: [aten._prelu_kernel, aten.sum, aten.div, aten.sub, aten.pow]
triton_poi_fused__prelu_kernel_div_pow_sub_sum_1.run(buf1, primals_4, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prelu, sub_1, x, mul, output], Original ATen: [aten._prelu_kernel, aten.sub, aten.div, aten.mul, aten.add]
triton_poi_fused__prelu_kernel_add_div_mul_sub_2.run(buf1, primals_4, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_6
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, prelu_1, sum_3, mean_1, sub_2, pow_2, sum_4, truediv_4, add_2, std_1, sub_3, x_1, mul_1, output_1], Original ATen: [aten.convolution, aten._prelu_kernel, aten.sum, aten.div, aten.sub, aten.pow, aten.add, aten.sqrt, aten.mul]
triton_poi_fused__prelu_kernel_add_convolution_div_mul_pow_sqrt_sub_sum_3.run(buf6, primals_8, primals_9, primals_10, primals_11, buf7, 16, grid=grid(16), stream=stream0)
del primals_11
del primals_8
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1, 1), (4, 1, 1, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_4.run(buf9, primals_13, 16, grid=grid(16), stream=stream0)
del primals_13
return (buf9, primals_1, primals_3, primals_4, primals_5, primals_7, primals_9, primals_10, primals_12, buf1, buf4, buf6, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
class tLN(nn.Module):
def __init__(self, dimension, eps=1e-08, trainable=True):
super(tLN, self).__init__()
self.eps = eps
if trainable:
self.gain = nn.Parameter(torch.ones(1, dimension, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, dimension, 1, 1))
else:
self.gain = Variable(torch.ones(1, dimension, 1, 1),
requires_grad=False)
self.bias = Variable(torch.zeros(1, dimension, 1, 1),
requires_grad=False)
def forward(self, inp):
inp.size(0)
mean = torch.sum(inp, 3, keepdim=True) / inp.shape[3]
std = torch.sqrt(torch.sum((inp - mean) ** 2, 3, keepdim=True) /
inp.shape[3] + self.eps)
x = (inp - mean.expand_as(inp)) / std.expand_as(inp)
return x * self.gain.expand_as(x).type(x.type()) + self.bias.expand_as(
x).type(x.type())
class CausalConv2d(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=(1, 1
), dilation=(1, 1), groups=1, bias=True):
_pad = int(np.log2((kernel_size[1] - 1) / 2))
padding_2 = int(2 ** (np.log2(dilation[1]) + _pad))
self.__padding = (kernel_size[0] - 1) * dilation[0], padding_2
super(CausalConv2d, self).__init__(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=self.__padding,
dilation=dilation, groups=groups, bias=bias)
def forward(self, input):
result = super(CausalConv2d, self).forward(input)
if self.__padding[0] != 0:
return result[:, :, :-self.__padding[0]]
return result
class DepthConv2d(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, dilation=(1,
1), stride=(1, 1), padding=(0, 0), causal=False):
super(DepthConv2d, self).__init__()
self.padding = padding
self.linear = nn.Conv2d(input_channel, hidden_channel, (1, 1))
if causal:
self.conv1d = CausalConv2d(hidden_channel, hidden_channel,
kernel, stride=stride, dilation=dilation)
else:
self.conv1d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
stride=stride, padding=self.padding, dilation=dilation)
self.BN = nn.Conv2d(hidden_channel, input_channel, (1, 1))
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = tLN(hidden_channel)
self.reg2 = tLN(hidden_channel)
def forward(self, input):
output = self.reg1(self.nonlinearity1(self.linear(input)))
output = self.reg2(self.nonlinearity2(self.conv1d(output)))
output = self.BN(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_channel': 4, 'hidden_channel': 4, 'kernel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_div_pow_sub_sum_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tmp8 = tmp7 > tmp1
tmp9 = tmp4 * tmp7
tmp10 = tl.where(tmp8, tmp7, tmp9)
tmp11 = tmp6 + tmp10
tmp13 = tmp12 > tmp1
tmp14 = tmp4 * tmp12
tmp15 = tl.where(tmp13, tmp12, tmp14)
tmp16 = tmp11 + tmp15
tmp18 = tmp17 > tmp1
tmp19 = tmp4 * tmp17
tmp20 = tl.where(tmp18, tmp17, tmp19)
tmp21 = tmp16 + tmp20
tmp22 = 0.25
tmp23 = tmp21 * tmp22
tmp24 = tmp6 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp10 - tmp23
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp15 - tmp23
tmp30 = tmp29 * tmp29
tmp31 = tmp28 + tmp30
tmp32 = tmp20 - tmp23
tmp33 = tmp32 * tmp32
tmp34 = tmp31 + tmp33
tmp35 = tmp34 * tmp22
tl.store(out_ptr0 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_add_div_mul_sub_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x5 = xindex // 4
x2 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp5 = tmp4 * tmp0
tmp6 = tl.where(tmp2, tmp0, tmp5)
tmp8 = tmp6 - tmp7
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp8 / tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tl.store(out_ptr0 + x4, tmp17, xmask)
@triton.jit
def triton_poi_fused__prelu_kernel_add_convolution_div_mul_pow_sqrt_sub_sum_3(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp18 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp7 = tmp6 * tmp2
tmp8 = tl.where(tmp4, tmp2, tmp7)
tmp9 = 1.0
tmp10 = tmp8 * tmp9
tmp11 = tmp8 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp9
tmp14 = 1e-08
tmp15 = tmp13 + tmp14
tmp16 = libdevice.sqrt(tmp15)
tmp17 = tmp11 / tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 + tmp20
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp21, xmask)
@triton.jit
def triton_poi_fused_convolution_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1,), (1,))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__prelu_kernel_div_pow_sub_sum_1[grid(64)](buf1,
primals_4, buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__prelu_kernel_add_div_mul_sub_2[grid(256)](buf1,
primals_4, buf2, buf3, primals_5, primals_6, buf4, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_6
buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused__prelu_kernel_add_convolution_div_mul_pow_sqrt_sub_sum_3[
grid(16)](buf6, primals_8, primals_9, primals_10, primals_11,
buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
del primals_8
buf8 = extern_kernels.convolution(buf7, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 1, 1), (4, 1, 1, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_4[grid(16)](buf9, primals_13, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf9, primals_1, primals_3, primals_4, primals_5, primals_7,
primals_9, primals_10, primals_12, buf1, buf4, buf6, buf7)
class tLN(nn.Module):
def __init__(self, dimension, eps=1e-08, trainable=True):
super(tLN, self).__init__()
self.eps = eps
if trainable:
self.gain = nn.Parameter(torch.ones(1, dimension, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, dimension, 1, 1))
else:
self.gain = Variable(torch.ones(1, dimension, 1, 1),
requires_grad=False)
self.bias = Variable(torch.zeros(1, dimension, 1, 1),
requires_grad=False)
def forward(self, inp):
inp.size(0)
mean = torch.sum(inp, 3, keepdim=True) / inp.shape[3]
std = torch.sqrt(torch.sum((inp - mean) ** 2, 3, keepdim=True) /
inp.shape[3] + self.eps)
x = (inp - mean.expand_as(inp)) / std.expand_as(inp)
return x * self.gain.expand_as(x).type(x.type()) + self.bias.expand_as(
x).type(x.type())
class CausalConv2d(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=(1, 1
), dilation=(1, 1), groups=1, bias=True):
_pad = int(np.log2((kernel_size[1] - 1) / 2))
padding_2 = int(2 ** (np.log2(dilation[1]) + _pad))
self.__padding = (kernel_size[0] - 1) * dilation[0], padding_2
super(CausalConv2d, self).__init__(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=self.__padding,
dilation=dilation, groups=groups, bias=bias)
def forward(self, input):
result = super(CausalConv2d, self).forward(input)
if self.__padding[0] != 0:
return result[:, :, :-self.__padding[0]]
return result
class DepthConv2dNew(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, dilation=(1,
1), stride=(1, 1), padding=(0, 0), causal=False):
super(DepthConv2dNew, self).__init__()
self.padding = padding
self.linear = nn.Conv2d(input_channel, hidden_channel, (1, 1))
if causal:
self.conv1d = CausalConv2d(hidden_channel, hidden_channel,
kernel, stride=stride, dilation=dilation)
else:
self.conv1d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
stride=stride, padding=self.padding, dilation=dilation)
self.BN = nn.Conv2d(hidden_channel, input_channel, (1, 1))
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = tLN(hidden_channel)
self.reg2 = tLN(hidden_channel)
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = self.conv1d.weight
primals_8 = self.conv1d.bias
primals_12 = self.BN.weight
primals_13 = self.BN.bias
primals_4 = self.nonlinearity1.weight
primals_9 = self.nonlinearity2.weight
primals_5 = self.reg1.gain
primals_6 = self.reg1.bias
primals_10 = self.reg2.gain
primals_11 = self.reg2.bias
primals_7 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| rbodo/pytorch-OpCounter | DepthConv2d | false | 7,549 | [
"MIT"
] | 1 | 1857cbb5f9e53343fb349af84efdfde2554a2691 | https://github.com/rbodo/pytorch-OpCounter/tree/1857cbb5f9e53343fb349af84efdfde2554a2691 | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
class tLN(nn.Module):
def __init__(self, dimension, eps=1e-08, trainable=True):
super().__init__()
self.eps = eps
if trainable:
self.gain = nn.Parameter(torch.ones(1, dimension, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, dimension, 1, 1))
else:
self.gain = Variable(torch.ones(1, dimension, 1, 1),
requires_grad=False)
self.bias = Variable(torch.zeros(1, dimension, 1, 1),
requires_grad=False)
def forward(self, inp):
inp.size(0)
mean = torch.sum(inp, 3, keepdim=True) / inp.shape[3]
std = torch.sqrt(torch.sum((inp - mean) ** 2, 3, keepdim=True) /
inp.shape[3] + self.eps)
x = (inp - mean.expand_as(inp)) / std.expand_as(inp)
return x * self.gain.expand_as(x).type(x.type()) + self.bias.expand_as(
x).type(x.type())
class CausalConv2d(torch.nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=(1, 1
), dilation=(1, 1), groups=1, bias=True):
_pad = int(np.log2((kernel_size[1] - 1) / 2))
padding_2 = int(2 ** (np.log2(dilation[1]) + _pad))
self.__padding = (kernel_size[0] - 1) * dilation[0], padding_2
super().__init__(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=self.__padding,
dilation=dilation, groups=groups, bias=bias)
def forward(self, input):
result = super(CausalConv2d, self).forward(input)
if self.__padding[0] != 0:
return result[:, :, :-self.__padding[0]]
return result
class Model(nn.Module):
def __init__(self, input_channel, hidden_channel, kernel, dilation=(1,
1), stride=(1, 1), padding=(0, 0), causal=False):
super().__init__()
self.padding = padding
self.linear = nn.Conv2d(input_channel, hidden_channel, (1, 1))
if causal:
self.conv1d = CausalConv2d(hidden_channel, hidden_channel,
kernel, stride=stride, dilation=dilation)
else:
self.conv1d = nn.Conv2d(hidden_channel, hidden_channel, kernel,
stride=stride, padding=self.padding, dilation=dilation)
self.BN = nn.Conv2d(hidden_channel, input_channel, (1, 1))
self.nonlinearity1 = nn.PReLU()
self.nonlinearity2 = nn.PReLU()
self.reg1 = tLN(hidden_channel)
self.reg2 = tLN(hidden_channel)
def forward(self, input):
output = self.reg1(self.nonlinearity1(self.linear(input)))
output = self.reg2(self.nonlinearity2(self.conv1d(output)))
output = self.BN(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
PrimaryCapsules | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lc/clc5ohchngeokcnvys542wmjet6kyvc7ykrfjeokvnuux2fzcenw.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2l/c2lbx6v56feb5jh4iyjmcfle7wev52bxzxcxnen4qqq552qtoya6.py
# Topologically Sorted Source Nodes: [pow_1, squared_norm, add, truediv, mul, sqrt, add_1, truediv_1], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# mul => mul
# pow_1 => pow_1
# sqrt => sqrt
# squared_norm => sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {})
# %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %view_1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_1 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 28, 28), (3136, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 12544, grid=grid(12544), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 784, 4), (3136, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, squared_norm, add, truediv, mul, sqrt, add_1, truediv_1], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.mul, aten.sqrt]
triton_poi_fused_add_div_mul_pow_sqrt_sum_1.run(buf1, buf2, 12544, grid=grid(12544), stream=stream0)
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 9, 9), (324, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def squash(s, dim=-1):
"""
"Squashing" non-linearity that shrunks short vectors to almost zero length and long vectors to a length slightly below 1
Eq. (1): v_j = ||s_j||^2 / (1 + ||s_j||^2) * s_j / ||s_j||
Args:
s: Vector before activation
dim: Dimension along which to calculate the norm
Returns:
Squashed vector
"""
squared_norm = torch.sum(s ** 2, dim=dim, keepdim=True)
return squared_norm / (1 + squared_norm) * s / (torch.sqrt(squared_norm
) + 1e-08)
class PrimaryCapsules(nn.Module):
def __init__(self, in_channels, out_channels, dim_caps, kernel_size=9,
stride=2, padding=0):
"""
Initialize the layer.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
dim_caps: Dimensionality, i.e. length, of the output capsule vector.
"""
super(PrimaryCapsules, self).__init__()
self.dim_caps = dim_caps
self._caps_channel = int(out_channels / dim_caps)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding)
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), self._caps_channel, out.size(2), out.
size(3), self.dim_caps)
out = out.view(out.size(0), -1, self.dim_caps)
return squash(out)
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'dim_caps': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12544
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 1.0
tmp12 = tmp10 + tmp11
tmp13 = tmp10 / tmp12
tmp15 = tmp13 * tmp14
tmp16 = libdevice.sqrt(tmp10)
tmp17 = 1e-08
tmp18 = tmp16 + tmp17
tmp19 = tmp15 / tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 28, 28), (3136, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(12544)](buf1, primals_2, 12544,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 784, 4), (3136, 4, 1), torch.float32)
triton_poi_fused_add_div_mul_pow_sqrt_sum_1[grid(12544)](buf1, buf2,
12544, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_3, buf1
def squash(s, dim=-1):
"""
"Squashing" non-linearity that shrunks short vectors to almost zero length and long vectors to a length slightly below 1
Eq. (1): v_j = ||s_j||^2 / (1 + ||s_j||^2) * s_j / ||s_j||
Args:
s: Vector before activation
dim: Dimension along which to calculate the norm
Returns:
Squashed vector
"""
squared_norm = torch.sum(s ** 2, dim=dim, keepdim=True)
return squared_norm / (1 + squared_norm) * s / (torch.sqrt(squared_norm
) + 1e-08)
class PrimaryCapsulesNew(nn.Module):
def __init__(self, in_channels, out_channels, dim_caps, kernel_size=9,
stride=2, padding=0):
"""
Initialize the layer.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
dim_caps: Dimensionality, i.e. length, of the output capsule vector.
"""
super(PrimaryCapsulesNew, self).__init__()
self.dim_caps = dim_caps
self._caps_channel = int(out_channels / dim_caps)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| richardsun-voyager/capsule-network | PrimaryCapsules | false | 7,550 | [
"MIT"
] | 1 | 349cec1caa9ab95ff4b3333c33d04b1bdb442f67 | https://github.com/richardsun-voyager/capsule-network/tree/349cec1caa9ab95ff4b3333c33d04b1bdb442f67 | import torch
import torch.nn as nn
def squash(s, dim=-1):
"""
"Squashing" non-linearity that shrunks short vectors to almost zero length and long vectors to a length slightly below 1
Eq. (1): v_j = ||s_j||^2 / (1 + ||s_j||^2) * s_j / ||s_j||
Args:
s: Vector before activation
dim: Dimension along which to calculate the norm
Returns:
Squashed vector
"""
squared_norm = torch.sum(s ** 2, dim=dim, keepdim=True)
return squared_norm / (1 + squared_norm) * s / (torch.sqrt(squared_norm
) + 1e-08)
class Model(nn.Module):
def __init__(self, in_channels, out_channels, dim_caps, kernel_size=9,
stride=2, padding=0):
"""
Initialize the layer.
Args:
in_channels: Number of input channels.
out_channels: Number of output channels.
dim_caps: Dimensionality, i.e. length, of the output capsule vector.
"""
super().__init__()
self.dim_caps = dim_caps
self._caps_channel = int(out_channels / dim_caps)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding)
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), self._caps_channel, out.size(2), out.
size(3), self.dim_caps)
out = out.view(out.size(0), -1, self.dim_caps)
return squash(out)
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4, 4, 4]
|
ChannelAttentionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [X], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# X => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [X], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# X => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ei/ceid4re34lmuazxtcmv2cljn2v2emu56yzgps5z74exnddnidgi3.py
# Topologically Sorted Source Nodes: [mul, E], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# E => add
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %view_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [X], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [X], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
del buf1
buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf3)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, E], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_2.run(primals_2, buf3, primals_1, buf4, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ChannelAttentionModule(nn.Module):
def __init__(self):
super(ChannelAttentionModule, self).__init__()
self.beta = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, A):
batchsize, num_channels, height, width = A.shape
N = height * width
A1 = A.view((batchsize, num_channels, N))
X = F.softmax(torch.bmm(A1, A1.permute(0, 2, 1)), dim=-1)
XA1 = torch.bmm(X.permute(0, 2, 1), A1).view((batchsize,
num_channels, height, width))
E = self.beta * XA1 + A
return E
def initialize_weights(self):
nn.init.constant_(self.beta.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64,
16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1,
16), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
buf3 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4),
0), reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0),
out=buf3)
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(256)](primals_2, buf3, primals_1,
buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf4, buf3
class ChannelAttentionModuleNew(nn.Module):
def __init__(self):
super(ChannelAttentionModuleNew, self).__init__()
self.beta = nn.Parameter(torch.zeros(1), requires_grad=True)
def initialize_weights(self):
nn.init.constant_(self.beta.data, 0.0)
def forward(self, input_0):
primals_2 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | ChannelAttentionModule | false | 7,551 | [
"MIT"
] | 1 | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | https://github.com/rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets/tree/75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.beta = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, A):
batchsize, num_channels, height, width = A.shape
N = height * width
A1 = A.view((batchsize, num_channels, N))
X = F.softmax(torch.bmm(A1, A1.permute(0, 2, 1)), dim=-1)
XA1 = torch.bmm(X.permute(0, 2, 1), A1).view((batchsize,
num_channels, height, width))
E = self.beta * XA1 + A
return E
def initialize_weights(self):
nn.init.constant_(self.beta.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Concat2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %slice_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Concat2d(nn.Module):
def __init__(self):
super(Concat2d, self).__init__()
def forward(self, x_down, x_enc):
if x_down.shape[-1] > x_enc.shape[-1]:
p = (x_down.shape[-1] - x_enc.shape[-1]) // 2
if (x_down.shape[-1] - x_enc.shape[-1]) % 2 != 0:
p += 1
x_enc = F.pad(x_enc, (p, p, p, p))
start = [(x_enc.shape[-2] - x_down.shape[-2]) // 2, (x_enc.shape[-1
] - x_down.shape[-1]) // 2]
length = [x_down.shape[-2], x_down.shape[-1]]
crop = torch.narrow(torch.narrow(x_enc, dim=2, start=start[0],
length=length[0]), dim=3, start=start[1], length=length[1])
cat = torch.cat(tensors=(x_down, crop), dim=1)
return cat
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class Concat2dNew(nn.Module):
def __init__(self):
super(Concat2dNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | Concat2d | false | 7,552 | [
"MIT"
] | 1 | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | https://github.com/rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets/tree/75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x_down, x_enc):
if x_down.shape[-1] > x_enc.shape[-1]:
p = (x_down.shape[-1] - x_enc.shape[-1]) // 2
if (x_down.shape[-1] - x_enc.shape[-1]) % 2 != 0:
p += 1
x_enc = F.pad(x_enc, (p, p, p, p))
start = [(x_enc.shape[-2] - x_down.shape[-2]) // 2, (x_enc.shape[-1
] - x_down.shape[-1]) // 2]
length = [x_down.shape[-2], x_down.shape[-1]]
crop = torch.narrow(torch.narrow(x_enc, dim=2, start=start[0],
length=length[0]), dim=3, start=start[1], length=length[1])
cat = torch.cat(tensors=(x_down, crop), dim=1)
return cat
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ia/ciaqxcwk4kpij3cqfsh3kugwj4vb2k4au4dshwoxpl4qlvjwwxrz.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# out_1 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=7] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aw/cawr6lnrfyvw4oaprxasgpeu3nqbaqfype73ukp7ovs3mthdbvhv.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# out_1 => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 3), kwargs = {})
triton_poi_fused_add_clamp_1 = async_compile.triton('triton_poi_fused_add_clamp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ex/cexxcsfzym3tnbdsltmqd73tdjfjaza7zxlxaz7iyzvcp4xkk2za.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# out_1 => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_1, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (2,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 2.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=5] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + (x0), tmp14, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nv/cnvfvayghdoicvmmoqduxx5rpguwh4mnzhcq5u3wekthrifcqav3.py
# Topologically Sorted Source Nodes: [conv2d, out, out_1, x], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d => convolution
# out => gt, mul, where
# out_1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_3, mul_4, mul_5, sub_3, sub_4, sub_6
# x => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_11, add_12, add_13, mul_10, mul_11, mul_9, sub_10, sub_11, sub_13
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_3), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_4), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_2), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_9), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_2), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_10), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %add_11), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_3), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_11), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: '*i64', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*fp32', 8: '*i64', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x5 = (xindex // 4)
x2 = (xindex // 4) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp50 = tmp29 + tmp49
tmp51 = tl.load(in_ptr8 + (tmp8 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr8 + (tmp20 + (4*tmp4) + (16*x5)), xmask, eviction_policy='evict_last')
tmp53 = tmp52 - tmp51
tmp54 = tmp53 * tmp27
tmp55 = tmp51 + tmp54
tmp56 = tl.load(in_ptr8 + (tmp8 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr8 + (tmp20 + (4*tmp33) + (16*x5)), xmask, eviction_policy='evict_last')
tmp58 = tmp57 - tmp56
tmp59 = tmp58 * tmp27
tmp60 = tmp56 + tmp59
tmp61 = tmp60 - tmp55
tmp62 = tmp61 * tmp48
tmp63 = tmp55 + tmp62
tl.store(in_out_ptr0 + (x4), tmp50, xmask)
tl.store(in_out_ptr1 + (x4), tmp63, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5q/c5qrop55suvpqojfx24xrvjlodhijl7tsdqplzshgs2ylmmfkwz4.py
# Topologically Sorted Source Nodes: [conv2d_1, out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# out_2 => gt_1, mul_6, where_1
# out_3 => add_14
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_6, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_6), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %convolution_2), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp7 > tmp3
tl.store(in_out_ptr0 + (x3), tmp9, xmask)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/s5/cs5o65bd5qls62jyofgsrvlsxw6ku55z7wudf4ziucds3lbj4c7j.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# conv2d => convolution
# out => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=5] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(buf1, 2, grid=grid(2), stream=stream0)
buf2 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_1.run(buf2, 2, grid=grid(2), stream=stream0)
buf3 = empty_strided_cuda((2, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_0.run(buf3, 2, grid=grid(2), stream=stream0)
buf4 = empty_strided_cuda((2, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_1.run(buf4, 2, grid=grid(2), stream=stream0)
buf5 = empty_strided_cuda((2, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2.run(buf5, 2, grid=grid(2), stream=stream0)
buf7 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2.run(buf7, 2, grid=grid(2), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf9 = buf8; del buf8 # reuse
buf11 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d, out, out_1, x], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3.run(buf9, buf12, buf1, buf3, buf0, primals_2, buf4, buf5, buf2, buf7, primals_3, 64, grid=grid(64), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 2, 2), (16, 4, 2, 1))
# Topologically Sorted Source Nodes: [skip], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 2, 2), (16, 4, 2, 1))
buf14 = buf13; del buf13 # reuse
buf15 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, out_2, out_3], Original ATen: [aten.convolution, aten.leaky_relu, aten.add, aten.leaky_relu_backward]
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4.run(buf14, buf10, primals_5, buf15, 64, grid=grid(64), stream=stream0)
del buf10
del primals_5
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5.run(buf0, primals_2, buf16, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf3, buf4, buf5, buf7, buf9, buf12, buf15, buf16, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn import functional as F
class ResBlock(nn.Module):
"""Residual block with bilinear upsampling/downsampling.
Args:
in_channels (int): Channel number of the input.
out_channels (int): Channel number of the output.
mode (str): Upsampling/downsampling mode. Options: down | up. Default: down.
"""
def __init__(self, in_channels, out_channels, mode='down'):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1)
self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False)
if mode == 'down':
self.scale_factor = 0.5
elif mode == 'up':
self.scale_factor = 2
def forward(self, x):
out = F.leaky_relu_(self.conv1(x), negative_slope=0.2)
out = F.interpolate(out, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
out = F.leaky_relu_(self.conv2(out), negative_slope=0.2)
x = F.interpolate(x, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
skip = self.skip(x)
out = out + skip
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tl.store(out_ptr0 + x0, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_clamp_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tl.full([1], 1, tl.int64)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 2.0
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp8.to(tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 - tmp10
tmp12 = triton_helpers.maximum(tmp11, tmp7)
tmp13 = 1.0
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tl.store(out_ptr0 + x0, tmp14, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x5 = xindex // 4
x2 = xindex // 4 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp48 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 + tmp10
tmp12 = 0.0
tmp13 = tmp11 > tmp12
tmp14 = 0.2
tmp15 = tmp11 * tmp14
tmp16 = tl.where(tmp13, tmp11, tmp15)
tmp18 = tmp17 + tmp1
tmp19 = tmp17 < 0
tmp20 = tl.where(tmp19, tmp18, tmp17)
tmp21 = tl.load(in_ptr2 + (tmp20 + 4 * tmp4 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp10
tmp23 = tmp22 > tmp12
tmp24 = tmp22 * tmp14
tmp25 = tl.where(tmp23, tmp22, tmp24)
tmp26 = tmp25 - tmp16
tmp28 = tmp26 * tmp27
tmp29 = tmp16 + tmp28
tmp31 = tmp30 + tmp1
tmp32 = tmp30 < 0
tmp33 = tl.where(tmp32, tmp31, tmp30)
tmp34 = tl.load(in_ptr2 + (tmp8 + 4 * tmp33 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp10
tmp36 = tmp35 > tmp12
tmp37 = tmp35 * tmp14
tmp38 = tl.where(tmp36, tmp35, tmp37)
tmp39 = tl.load(in_ptr2 + (tmp20 + 4 * tmp33 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp10
tmp41 = tmp40 > tmp12
tmp42 = tmp40 * tmp14
tmp43 = tl.where(tmp41, tmp40, tmp42)
tmp44 = tmp43 - tmp38
tmp45 = tmp44 * tmp27
tmp46 = tmp38 + tmp45
tmp47 = tmp46 - tmp29
tmp49 = tmp47 * tmp48
tmp50 = tmp29 + tmp49
tmp51 = tl.load(in_ptr8 + (tmp8 + 4 * tmp4 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp52 = tl.load(in_ptr8 + (tmp20 + 4 * tmp4 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp53 = tmp52 - tmp51
tmp54 = tmp53 * tmp27
tmp55 = tmp51 + tmp54
tmp56 = tl.load(in_ptr8 + (tmp8 + 4 * tmp33 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp57 = tl.load(in_ptr8 + (tmp20 + 4 * tmp33 + 16 * x5), xmask,
eviction_policy='evict_last')
tmp58 = tmp57 - tmp56
tmp59 = tmp58 * tmp27
tmp60 = tmp56 + tmp59
tmp61 = tmp60 - tmp55
tmp62 = tmp61 * tmp48
tmp63 = tmp55 + tmp62
tl.store(in_out_ptr0 + x4, tmp50, xmask)
tl.store(in_out_ptr1 + x4, tmp63, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4(
in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp9 = tmp7 + tmp8
tmp10 = tmp7 > tmp3
tl.store(in_out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5(in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(2)](buf1, 2, XBLOCK=2, num_warps=1,
num_stages=1)
buf2 = empty_strided_cuda((2, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_1[grid(2)](buf2, 2, XBLOCK=2, num_warps=
1, num_stages=1)
buf3 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused__to_copy_0[grid(2)](buf3, 2, XBLOCK=2, num_warps=1,
num_stages=1)
buf4 = empty_strided_cuda((2,), (1,), torch.int64)
triton_poi_fused_add_clamp_1[grid(2)](buf4, 2, XBLOCK=2, num_warps=
1, num_stages=1)
buf5 = empty_strided_cuda((2,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(2)](buf5,
2, XBLOCK=2, num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_2[grid(2)](buf7,
2, XBLOCK=2, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf9 = buf8
del buf8
buf11 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
buf12 = buf11
del buf11
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_3[
grid(64)](buf9, buf12, buf1, buf3, buf0, primals_2, buf4, buf5,
buf2, buf7, primals_3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 2, 2), (16, 4, 2, 1))
buf13 = extern_kernels.convolution(buf12, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 4, 2, 2), (16, 4, 2, 1))
buf14 = buf13
del buf13
buf15 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.bool)
triton_poi_fused_add_convolution_leaky_relu_leaky_relu_backward_4[grid
(64)](buf14, buf10, primals_5, buf15, 64, XBLOCK=64, num_warps=
1, num_stages=1)
del buf10
del primals_5
buf16 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_5[grid(256)
](buf0, primals_2, buf16, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf0
del primals_2
return (buf14, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf3, buf4, buf5, buf7, buf9, buf12, buf15, buf16)
class ResBlockNew(nn.Module):
"""Residual block with bilinear upsampling/downsampling.
Args:
in_channels (int): Channel number of the input.
out_channels (int): Channel number of the output.
mode (str): Upsampling/downsampling mode. Options: down | up. Default: down.
"""
def __init__(self, in_channels, out_channels, mode='down'):
super(ResBlockNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1)
self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False)
if mode == 'down':
self.scale_factor = 0.5
elif mode == 'up':
self.scale_factor = 2
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.skip.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| rawandahmad698/GFPGAN | ResBlock | false | 7,553 | [
"BSD-3-Clause"
] | 1 | 4700bf1a94ec9c36746f660db19f4f03e0eed9b0 | https://github.com/rawandahmad698/GFPGAN/tree/4700bf1a94ec9c36746f660db19f4f03e0eed9b0 | import torch
import torch.nn as nn
from torch.nn import functional as F
class Model(nn.Module):
"""Residual block with bilinear upsampling/downsampling.
Args:
in_channels (int): Channel number of the input.
out_channels (int): Channel number of the output.
mode (str): Upsampling/downsampling mode. Options: down | up. Default: down.
"""
def __init__(self, in_channels, out_channels, mode='down'):
super().__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, 3, 1, 1)
self.conv2 = nn.Conv2d(in_channels, out_channels, 3, 1, 1)
self.skip = nn.Conv2d(in_channels, out_channels, 1, bias=False)
if mode == 'down':
self.scale_factor = 0.5
elif mode == 'up':
self.scale_factor = 2
def forward(self, x):
out = F.leaky_relu_(self.conv1(x), negative_slope=0.2)
out = F.interpolate(out, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
out = F.leaky_relu_(self.conv2(out), negative_slope=0.2)
x = F.interpolate(x, scale_factor=self.scale_factor, mode=
'bilinear', align_corners=False)
skip = self.skip(x)
out = out + skip
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CapsuleLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/45/c45cx2lxqh23a4cwusnudd3symnnclztp6rmlej5jbbq2dwne5tl.py
# Topologically Sorted Source Nodes: [sub, relu, pow_1, mul, sub_1, mul_1, sub_2, relu_1, pow_2, mul_2, L_k, L_k_1, margin_loss], Original ATen: [aten.rsub, aten.relu, aten.pow, aten.mul, aten.sub, aten.add, aten.sum]
# Source node to ATen node mapping:
# L_k => add
# L_k_1 => sum_1
# margin_loss => sum_2
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# pow_2 => pow_2
# relu => relu
# relu_1 => relu_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (0.9, %arg0_1), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub,), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %pow_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 0.5), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_2,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu_1, 2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %pow_2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%add, [1]), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sum_1,), kwargs = {})
triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp18 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp19 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp32 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp33 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp46 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp47 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = 0.9
tmp3 = tmp2 - tmp1
tmp4 = tl.full([1, 1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp5 * tmp5
tmp7 = tmp0 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp0
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 0.1
tmp13 = tmp1 - tmp12
tmp14 = triton_helpers.maximum(tmp4, tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp11 * tmp15
tmp17 = tmp7 + tmp16
tmp20 = tmp2 - tmp19
tmp21 = triton_helpers.maximum(tmp4, tmp20)
tmp22 = tmp21 * tmp21
tmp23 = tmp18 * tmp22
tmp24 = tmp8 - tmp18
tmp25 = tmp24 * tmp10
tmp26 = tmp19 - tmp12
tmp27 = triton_helpers.maximum(tmp4, tmp26)
tmp28 = tmp27 * tmp27
tmp29 = tmp25 * tmp28
tmp30 = tmp23 + tmp29
tmp31 = tmp17 + tmp30
tmp34 = tmp2 - tmp33
tmp35 = triton_helpers.maximum(tmp4, tmp34)
tmp36 = tmp35 * tmp35
tmp37 = tmp32 * tmp36
tmp38 = tmp8 - tmp32
tmp39 = tmp38 * tmp10
tmp40 = tmp33 - tmp12
tmp41 = triton_helpers.maximum(tmp4, tmp40)
tmp42 = tmp41 * tmp41
tmp43 = tmp39 * tmp42
tmp44 = tmp37 + tmp43
tmp45 = tmp31 + tmp44
tmp48 = tmp2 - tmp47
tmp49 = triton_helpers.maximum(tmp4, tmp48)
tmp50 = tmp49 * tmp49
tmp51 = tmp46 * tmp50
tmp52 = tmp8 - tmp46
tmp53 = tmp52 * tmp10
tmp54 = tmp47 - tmp12
tmp55 = triton_helpers.maximum(tmp4, tmp54)
tmp56 = tmp55 * tmp55
tmp57 = tmp53 * tmp56
tmp58 = tmp51 + tmp57
tmp59 = tmp45 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp62 = tl.sum(tmp60, 1)[:, None]
tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp62, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xk/cxkwv5coj2xdbodmufpdzzfbs244xblz2r4qke7ld3aivawzenny.py
# Topologically Sorted Source Nodes: [reconstruction_loss, mul_3, caps_loss], Original ATen: [aten.mse_loss, aten.mul, aten.add]
# Source node to ATen node mapping:
# caps_loss => add_1
# mul_3 => mul_3
# reconstruction_loss => pow_3, sub_3, sum_3
# Graph fragment:
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg2_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%pow_3,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.0005), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %mul_3), kwargs = {})
triton_per_fused_add_mse_loss_mul_1 = async_compile.triton('triton_per_fused_add_mse_loss_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mse_loss_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mse_loss_mul_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp7 = tl.load(in_out_ptr0 + (0))
tmp8 = tl.broadcast_to(tmp7, [1])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = 0.0005
tmp10 = tmp6 * tmp9
tmp11 = tmp8 + tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sub, relu, pow_1, mul, sub_1, mul_1, sub_2, relu_1, pow_2, mul_2, L_k, L_k_1, margin_loss], Original ATen: [aten.rsub, aten.relu, aten.pow, aten.mul, aten.sub, aten.add, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0.run(arg1_1, arg0_1, buf1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [reconstruction_loss, mul_3, caps_loss], Original ATen: [aten.mse_loss, aten.mul, aten.add]
triton_per_fused_add_mse_loss_mul_1.run(buf3, arg3_1, arg2_1, 1, 256, grid=grid(1), stream=stream0)
del arg2_1
del arg3_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class MarginLoss(nn.Module):
def __init__(self, size_average=False, loss_lambda=0.5):
"""
Margin loss for digit existence
Eq. (4): L_k = T_k * max(0, m+ - ||v_k||)^2 + lambda * (1 - T_k) * max(0, ||v_k|| - m-)^2
Args:
size_average: should the losses be averaged (True) or summed (False) over observations for each minibatch.
loss_lambda: parameter for down-weighting the loss for missing digits
"""
super(MarginLoss, self).__init__()
self.size_average = size_average
self.m_plus = 0.9
self.m_minus = 0.1
self.loss_lambda = loss_lambda
def forward(self, inputs, labels):
L_k = labels * F.relu(self.m_plus - inputs) ** 2 + self.loss_lambda * (
1 - labels) * F.relu(inputs - self.m_minus) ** 2
L_k = L_k.sum(dim=1)
if self.size_average:
return L_k.mean()
else:
return L_k.sum()
class CapsuleLoss(nn.Module):
def __init__(self, loss_lambda=0.5, recon_loss_scale=0.0005,
size_average=False):
"""
Combined margin loss and reconstruction loss. Margin loss see above.
Sum squared error (SSE) was used as a reconstruction loss.
Args:
recon_loss_scale: param for scaling down the the reconstruction loss
size_average: if True, reconstruction loss becomes MSE instead of SSE
"""
super(CapsuleLoss, self).__init__()
self.size_average = size_average
self.margin_loss = MarginLoss(size_average=size_average,
loss_lambda=loss_lambda)
self.reconstruction_loss = nn.MSELoss(size_average=size_average)
self.recon_loss_scale = recon_loss_scale
def forward(self, inputs, labels, images, reconstructions):
margin_loss = self.margin_loss(inputs, labels)
reconstruction_loss = self.reconstruction_loss(reconstructions, images)
caps_loss = margin_loss + self.recon_loss_scale * reconstruction_loss
return caps_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0(in_ptr0, in_ptr1,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp18 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp19 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp32 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp33 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp46 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp47 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = 0.9
tmp3 = tmp2 - tmp1
tmp4 = tl.full([1, 1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = tmp5 * tmp5
tmp7 = tmp0 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp0
tmp10 = 0.5
tmp11 = tmp9 * tmp10
tmp12 = 0.1
tmp13 = tmp1 - tmp12
tmp14 = triton_helpers.maximum(tmp4, tmp13)
tmp15 = tmp14 * tmp14
tmp16 = tmp11 * tmp15
tmp17 = tmp7 + tmp16
tmp20 = tmp2 - tmp19
tmp21 = triton_helpers.maximum(tmp4, tmp20)
tmp22 = tmp21 * tmp21
tmp23 = tmp18 * tmp22
tmp24 = tmp8 - tmp18
tmp25 = tmp24 * tmp10
tmp26 = tmp19 - tmp12
tmp27 = triton_helpers.maximum(tmp4, tmp26)
tmp28 = tmp27 * tmp27
tmp29 = tmp25 * tmp28
tmp30 = tmp23 + tmp29
tmp31 = tmp17 + tmp30
tmp34 = tmp2 - tmp33
tmp35 = triton_helpers.maximum(tmp4, tmp34)
tmp36 = tmp35 * tmp35
tmp37 = tmp32 * tmp36
tmp38 = tmp8 - tmp32
tmp39 = tmp38 * tmp10
tmp40 = tmp33 - tmp12
tmp41 = triton_helpers.maximum(tmp4, tmp40)
tmp42 = tmp41 * tmp41
tmp43 = tmp39 * tmp42
tmp44 = tmp37 + tmp43
tmp45 = tmp31 + tmp44
tmp48 = tmp2 - tmp47
tmp49 = triton_helpers.maximum(tmp4, tmp48)
tmp50 = tmp49 * tmp49
tmp51 = tmp46 * tmp50
tmp52 = tmp8 - tmp46
tmp53 = tmp52 * tmp10
tmp54 = tmp47 - tmp12
tmp55 = triton_helpers.maximum(tmp4, tmp54)
tmp56 = tmp55 * tmp55
tmp57 = tmp53 * tmp56
tmp58 = tmp51 + tmp57
tmp59 = tmp45 + tmp58
tmp60 = tl.broadcast_to(tmp59, [XBLOCK, RBLOCK])
tmp62 = tl.sum(tmp60, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp62, None)
@triton.jit
def triton_per_fused_add_mse_loss_mul_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp7 = tl.load(in_out_ptr0 + 0)
tmp8 = tl.broadcast_to(tmp7, [1])
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp9 = 0.0005
tmp10 = tmp6 * tmp9
tmp11 = tmp8 + tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_pow_relu_rsub_sub_sum_0[grid(1)](arg1_1,
arg0_1, buf1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = buf1
del buf1
triton_per_fused_add_mse_loss_mul_1[grid(1)](buf3, arg3_1, arg2_1,
1, 256, num_warps=2, num_stages=1)
del arg2_1
del arg3_1
return buf3,
class MarginLoss(nn.Module):
def __init__(self, size_average=False, loss_lambda=0.5):
"""
Margin loss for digit existence
Eq. (4): L_k = T_k * max(0, m+ - ||v_k||)^2 + lambda * (1 - T_k) * max(0, ||v_k|| - m-)^2
Args:
size_average: should the losses be averaged (True) or summed (False) over observations for each minibatch.
loss_lambda: parameter for down-weighting the loss for missing digits
"""
super(MarginLoss, self).__init__()
self.size_average = size_average
self.m_plus = 0.9
self.m_minus = 0.1
self.loss_lambda = loss_lambda
def forward(self, inputs, labels):
L_k = labels * F.relu(self.m_plus - inputs) ** 2 + self.loss_lambda * (
1 - labels) * F.relu(inputs - self.m_minus) ** 2
L_k = L_k.sum(dim=1)
if self.size_average:
return L_k.mean()
else:
return L_k.sum()
class CapsuleLossNew(nn.Module):
def __init__(self, loss_lambda=0.5, recon_loss_scale=0.0005,
size_average=False):
"""
Combined margin loss and reconstruction loss. Margin loss see above.
Sum squared error (SSE) was used as a reconstruction loss.
Args:
recon_loss_scale: param for scaling down the the reconstruction loss
size_average: if True, reconstruction loss becomes MSE instead of SSE
"""
super(CapsuleLossNew, self).__init__()
self.size_average = size_average
self.margin_loss = MarginLoss(size_average=size_average,
loss_lambda=loss_lambda)
self.reconstruction_loss = nn.MSELoss(size_average=size_average)
self.recon_loss_scale = recon_loss_scale
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| richardsun-voyager/capsule-network | CapsuleLoss | false | 7,554 | [
"MIT"
] | 1 | 349cec1caa9ab95ff4b3333c33d04b1bdb442f67 | https://github.com/richardsun-voyager/capsule-network/tree/349cec1caa9ab95ff4b3333c33d04b1bdb442f67 | import torch
import torch.nn as nn
import torch.nn.functional as F
class MarginLoss(nn.Module):
def __init__(self, size_average=False, loss_lambda=0.5):
"""
Margin loss for digit existence
Eq. (4): L_k = T_k * max(0, m+ - ||v_k||)^2 + lambda * (1 - T_k) * max(0, ||v_k|| - m-)^2
Args:
size_average: should the losses be averaged (True) or summed (False) over observations for each minibatch.
loss_lambda: parameter for down-weighting the loss for missing digits
"""
super().__init__()
self.size_average = size_average
self.m_plus = 0.9
self.m_minus = 0.1
self.loss_lambda = loss_lambda
def forward(self, inputs, labels):
L_k = labels * F.relu(self.m_plus - inputs) ** 2 + self.loss_lambda * (
1 - labels) * F.relu(inputs - self.m_minus) ** 2
L_k = L_k.sum(dim=1)
if self.size_average:
return L_k.mean()
else:
return L_k.sum()
class Model(nn.Module):
def __init__(self, loss_lambda=0.5, recon_loss_scale=0.0005,
size_average=False):
"""
Combined margin loss and reconstruction loss. Margin loss see above.
Sum squared error (SSE) was used as a reconstruction loss.
Args:
recon_loss_scale: param for scaling down the the reconstruction loss
size_average: if True, reconstruction loss becomes MSE instead of SSE
"""
super().__init__()
self.size_average = size_average
self.margin_loss = MarginLoss(size_average=size_average,
loss_lambda=loss_lambda)
self.reconstruction_loss = nn.MSELoss(size_average=size_average)
self.recon_loss_scale = recon_loss_scale
def forward(self, inputs, labels, images, reconstructions):
margin_loss = self.margin_loss(inputs, labels)
reconstruction_loss = self.reconstruction_loss(reconstructions, images)
caps_loss = margin_loss + self.recon_loss_scale * reconstruction_loss
return caps_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GAT | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/i4/ci4j7o62hjlvxysby5leuec4f5mnobz3p5wi5zmgnb6pfgczycms.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view_2, %repeat_1], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*((((4*x1) + x0) // 16) % 4)) + (16*((((4*x1) + (64*x2) + x0) // 64) % 4)) + ((((4*x1) + x0) % 16) % 4)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr0 + ((4*(x1 % 4)) + (16*x2) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fy/cfyhpfvlh7v2kamyddf44ycfki2eygiwxnllf3xlbccy7vzxtcnc.py
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# e => gt
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%squeeze, 0), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nt/cntfzp3df5gosg7lgle2luub35j2ri2v6jggxvvfq3ayv6thp7nn.py
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_2, attention_3, e_2, attention_4, attention_5, e_3, attention_6, attention_7], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => amax, exp, sub, sum_1
# attention_2 => where_4
# attention_3 => amax_1, exp_1, sub_1, sum_2
# attention_4 => where_7
# attention_5 => amax_2, exp_2, sub_2, sum_3
# attention_6 => where_10
# attention_7 => amax_3, exp_3, sub_3, sum_4
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [2], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_1, [2], True), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_7, [2], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_2, [2], True), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_10, [2], True), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_3, [2], True), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_2 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 36, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp41 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp46 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp52 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp58 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp74 = tl.load(in_ptr5 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp75 = tl.load(in_ptr6 + (4*x0), xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp80 = tl.load(in_ptr6 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp85 = tl.load(in_ptr5 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp86 = tl.load(in_ptr6 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp91 = tl.load(in_ptr5 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp92 = tl.load(in_ptr6 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp108 = tl.load(in_ptr7 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp109 = tl.load(in_ptr8 + (4*x0), xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp114 = tl.load(in_ptr8 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp119 = tl.load(in_ptr7 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp120 = tl.load(in_ptr8 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp125 = tl.load(in_ptr7 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp126 = tl.load(in_ptr8 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp39, xmask)
tl.store(out_ptr2 + (x0), tmp62, xmask)
tl.store(out_ptr3 + (x0), tmp73, xmask)
tl.store(out_ptr4 + (x0), tmp96, xmask)
tl.store(out_ptr5 + (x0), tmp107, xmask)
tl.store(out_ptr6 + (x0), tmp130, xmask)
tl.store(out_ptr7 + (x0), tmp141, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/av/cavxuzwlrtslrhduxjlzxdhyxrxry2pezmj4lh627cairqar5xms.py
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_2, attention_3, e_2, attention_4, attention_5, e_3, attention_6, attention_7], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => div, exp, sub
# attention_2 => where_4
# attention_3 => div_1, exp_1, sub_1
# attention_4 => where_7
# attention_5 => div_2, exp_2, sub_2
# attention_6 => where_10
# attention_7 => div_3, exp_3, sub_3
# e => mul, where
# e_1 => mul_5, where_3
# e_2 => mul_10, where_6
# e_3 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %squeeze, %mul), kwargs = {})
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %squeeze_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_1, %sum_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %squeeze_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_2, %sum_3), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %squeeze_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_3, %sum_4), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_3 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*i1', 6: '*fp32', 7: '*fp32', 8: '*i1', 9: '*fp32', 10: '*fp32', 11: '*i1', 12: '*fp32', 13: '*fp32', 14: '*i1', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_3', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + (x2), xmask).to(tl.int1)
tmp14 = tl.load(in_out_ptr1 + (x2), xmask)
tmp18 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr7 + (x2), xmask).to(tl.int1)
tmp24 = tl.load(in_out_ptr2 + (x2), xmask)
tmp28 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr9 + (x1), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr10 + (x2), xmask).to(tl.int1)
tmp34 = tl.load(in_out_ptr3 + (x2), xmask)
tmp38 = tl.load(in_ptr11 + (x1), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr12 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(in_out_ptr0 + (x2), tmp12, xmask)
tl.store(in_out_ptr1 + (x2), tmp22, xmask)
tl.store(in_out_ptr2 + (x2), tmp32, xmask)
tl.store(in_out_ptr3 + (x2), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ga/cgambdox4jgw6nla3qyjekvcpje3ddyszyfludu47zysoazeme6p.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat_4
# Graph fragment:
# %cat_4 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%where_2, %where_5, %where_8, %where_11], 2), kwargs = {})
triton_poi_fused_cat_4 = async_compile.triton('triton_poi_fused_cat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tmp40 = tl.full([1], 16, tl.int64)
tmp41 = tmp0 < tmp40
tmp42 = tl.load(in_ptr3 + ((4*x1) + ((-12) + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + (x2), tmp52, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mk/cmkxbav3dhefi2y5nylpf2wielp36aeuwte7nr7di76nqd6pk72m.py
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_8, attention_9], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention_8 => where_13
# attention_9 => amax_4, exp_4, sub_4, sum_5
# e_4 => mul_20, where_12
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_4, 4), kwargs = {})
# %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %squeeze_4, %mul_20), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_12, %full_default), kwargs = {})
# %amax_4 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_13, [2], True), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_13, %amax_4), kwargs = {})
# %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_4, [2], True), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_5 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + (x0), tmp28, xmask)
tl.store(out_ptr1 + (x0), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5w/c5wclmk4wec5idrxpja6xw2xfmte3755tdz46fqxkd5oa7tddjji.py
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_8, attention_9], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention_8 => where_13
# attention_9 => div_4, exp_4, sub_4
# e_4 => mul_20, where_12
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze_4, 4), kwargs = {})
# %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %squeeze_4, %mul_20), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_12, %full_default), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_13, %amax_4), kwargs = {})
# %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_4, %sum_5), kwargs = {})
triton_poi_fused__softmax_leaky_relu_mul_where_6 = async_compile.triton('triton_poi_fused__softmax_leaky_relu_mul_where_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*i1', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_leaky_relu_mul_where_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(in_out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vb/cvbr7j3oazjy77ftp2ea563wjuj5ki27jsi64ahnlb2tyajjq3vn.py
# Topologically Sorted Source Nodes: [x_3, add], Original ATen: [aten.elu, aten.add]
# Source node to ATen node mapping:
# add => add
# x_3 => expm1_4, gt_14, mul_22, mul_24, where_14
# Graph fragment:
# %gt_14 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%bmm_4, 0), kwargs = {})
# %mul_22 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm_4, 1.0), kwargs = {})
# %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_22,), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.0), kwargs = {})
# %where_14 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_14, %mul_22, %mul_24), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_14, %primals_1), kwargs = {})
triton_poi_fused_add_elu_7 = async_compile.triton('triton_poi_fused_add_elu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_elu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_elu_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tmp7 + tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, buf1, 512, grid=grid(512), stream=stream0)
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0), primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
triton_poi_fused_leaky_relu_1.run(primals_4, buf4, 64, grid=grid(64), stream=stream0)
del primals_4
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf9, buf10, 512, grid=grid(512), stream=stream0)
buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf10, (64, 8), (8, 1), 0), primals_6, out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf17, buf18, 512, grid=grid(512), stream=stream0)
buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf18, (64, 8), (8, 1), 0), primals_8, out=buf19)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf19, buf20, 64, grid=grid(64), stream=stream0)
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf25, buf26, 512, grid=grid(512), stream=stream0)
buf27 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf26, (64, 8), (8, 1), 0), primals_10, out=buf27)
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf27, buf28, 64, grid=grid(64), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf21 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf22 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf29 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf30 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_2, attention_3, e_2, attention_4, attention_5, e_3, attention_6, attention_7], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_2.run(buf4, buf3, buf2, buf12, buf11, buf20, buf19, buf28, buf27, buf5, buf6, buf13, buf14, buf21, buf22, buf29, buf30, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0); del buf2 # reuse
buf15 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0); del buf11 # reuse
buf23 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse
buf31 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [e, zero_vec, attention, attention_1, e_1, attention_2, attention_3, e_2, attention_4, attention_5, e_3, attention_6, attention_7], Original ATen: [aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_3.run(buf7, buf15, buf23, buf31, buf4, buf3, buf5, buf6, buf12, buf13, buf14, buf20, buf21, buf22, buf28, buf29, buf30, 64, grid=grid(64), stream=stream0)
del buf13
del buf14
del buf21
del buf22
del buf29
del buf30
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.bmm]
extern_kernels.bmm(buf7, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf8)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf15, reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0), out=buf16)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.bmm]
extern_kernels.bmm(buf23, reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0), out=buf24)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf31, reinterpret_tensor(buf25, (4, 4, 4), (16, 4, 1), 0), out=buf32)
buf33 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_4.run(buf8, buf16, buf24, buf32, buf33, 256, grid=grid(256), stream=stream0)
buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_4], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf33, (16, 16), (16, 1), 0), primals_11, out=buf34)
buf35 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_5], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf34, buf35, 512, grid=grid(512), stream=stream0)
buf36 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf35, (64, 8), (8, 1), 0), primals_12, out=buf36)
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_4], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf36, buf37, 64, grid=grid(64), stream=stream0)
buf38 = buf6; del buf6 # reuse
buf39 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_8, attention_9], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_5.run(buf4, buf37, buf36, buf38, buf39, 16, grid=grid(16), stream=stream0)
buf40 = reinterpret_tensor(buf36, (4, 4, 4), (16, 4, 1), 0); del buf36 # reuse
# Topologically Sorted Source Nodes: [zero_vec, e_4, attention_8, attention_9], Original ATen: [aten.mul, aten.leaky_relu, aten.where, aten._softmax]
triton_poi_fused__softmax_leaky_relu_mul_where_6.run(buf40, buf4, buf37, buf38, buf39, 64, grid=grid(64), stream=stream0)
del buf38
del buf39
buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_prime_4], Original ATen: [aten.bmm]
extern_kernels.bmm(buf40, reinterpret_tensor(buf34, (4, 4, 4), (16, 4, 1), 0), out=buf41)
buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, add], Original ATen: [aten.elu, aten.add]
triton_poi_fused_add_elu_7.run(buf41, primals_1, buf42, 64, grid=grid(64), stream=stream0)
return (buf42, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, reinterpret_tensor(buf34, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf35, (8, 64), (1, 8), 0), reinterpret_tensor(primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf33, (16, 16), (1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(buf25, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf26, (8, 64), (1, 8), 0), reinterpret_tensor(primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), reinterpret_tensor(buf17, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf18, (8, 64), (1, 8), 0), reinterpret_tensor(primals_8, (1, 8), (1, 1), 0), reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf10, (8, 64), (1, 8), 0), reinterpret_tensor(primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (8, 64), (1, 8), 0), reinterpret_tensor(primals_3, (1, 8), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(query, key, value, mask=None, dropout=None, return_scores=False):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
scores = dropout(scores)
if return_scores:
return torch.matmul(p_attn, value), p_attn, scores
else:
return torch.matmul(p_attn, value), p_attn
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
B, N = h.size()[0], h.size()[1]
a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat
(1, N, 1)], dim=2).view(B, N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=2)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, nlayers=2):
"""Dense version of GAT."""
super(GAT, self).__init__()
self.dropout = dropout
self.nlayers = nlayers
self.nheads = nheads
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
if self.nlayers > 2:
for i in range(self.nlayers - 2):
for j in range(self.nheads):
self.add_module('attention_{}_{}'.format(i + 1, j),
GraphAttentionLayer(nhid * nheads, nhid, dropout=
dropout, alpha=alpha, concat=True))
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=
dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
input = x
x = torch.cat([att(x, adj) for att in self.attentions], dim=2)
if self.nlayers > 2:
for i in range(self.nlayers - 2):
temp = []
x = F.dropout(x, self.dropout, training=self.training)
cur_input = x
for j in range(self.nheads):
temp.append(self.__getattr__('attention_{}_{}'.format(i +
1, j))(x, adj))
x = torch.cat(temp, dim=2) + cur_input
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return x + input
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5,
'alpha': 4, 'nheads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * ((4 * x1 + x0) // 16 % 4) + 16 * ((4 * x1 +
64 * x2 + x0) // 64 % 4) + (4 * x1 + x0) % 16 % 4), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr0 + (4 * (x1 % 4) + 16 * x2 + (-4 + x0)), tmp6 &
xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp40 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp41 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp51 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp52 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp58 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp74 = tl.load(in_ptr5 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp75 = tl.load(in_ptr6 + 4 * x0, xmask, eviction_policy='evict_last')
tmp79 = tl.load(in_ptr5 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp80 = tl.load(in_ptr6 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp85 = tl.load(in_ptr5 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp86 = tl.load(in_ptr6 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp91 = tl.load(in_ptr5 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp92 = tl.load(in_ptr6 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp108 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp109 = tl.load(in_ptr8 + 4 * x0, xmask, eviction_policy='evict_last')
tmp113 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp114 = tl.load(in_ptr8 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp119 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp120 = tl.load(in_ptr8 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp125 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp126 = tl.load(in_ptr8 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tmp42 = tmp41 * tmp3
tmp43 = tl.where(tmp40, tmp41, tmp42)
tmp44 = tl.where(tmp0, tmp43, tmp6)
tmp47 = tmp46 * tmp3
tmp48 = tl.where(tmp45, tmp46, tmp47)
tmp49 = tl.where(tmp8, tmp48, tmp6)
tmp50 = triton_helpers.maximum(tmp44, tmp49)
tmp53 = tmp52 * tmp3
tmp54 = tl.where(tmp51, tmp52, tmp53)
tmp55 = tl.where(tmp15, tmp54, tmp6)
tmp56 = triton_helpers.maximum(tmp50, tmp55)
tmp59 = tmp58 * tmp3
tmp60 = tl.where(tmp57, tmp58, tmp59)
tmp61 = tl.where(tmp22, tmp60, tmp6)
tmp62 = triton_helpers.maximum(tmp56, tmp61)
tmp63 = tmp44 - tmp62
tmp64 = tl_math.exp(tmp63)
tmp65 = tmp49 - tmp62
tmp66 = tl_math.exp(tmp65)
tmp67 = tmp64 + tmp66
tmp68 = tmp55 - tmp62
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp67 + tmp69
tmp71 = tmp61 - tmp62
tmp72 = tl_math.exp(tmp71)
tmp73 = tmp70 + tmp72
tmp76 = tmp75 * tmp3
tmp77 = tl.where(tmp74, tmp75, tmp76)
tmp78 = tl.where(tmp0, tmp77, tmp6)
tmp81 = tmp80 * tmp3
tmp82 = tl.where(tmp79, tmp80, tmp81)
tmp83 = tl.where(tmp8, tmp82, tmp6)
tmp84 = triton_helpers.maximum(tmp78, tmp83)
tmp87 = tmp86 * tmp3
tmp88 = tl.where(tmp85, tmp86, tmp87)
tmp89 = tl.where(tmp15, tmp88, tmp6)
tmp90 = triton_helpers.maximum(tmp84, tmp89)
tmp93 = tmp92 * tmp3
tmp94 = tl.where(tmp91, tmp92, tmp93)
tmp95 = tl.where(tmp22, tmp94, tmp6)
tmp96 = triton_helpers.maximum(tmp90, tmp95)
tmp97 = tmp78 - tmp96
tmp98 = tl_math.exp(tmp97)
tmp99 = tmp83 - tmp96
tmp100 = tl_math.exp(tmp99)
tmp101 = tmp98 + tmp100
tmp102 = tmp89 - tmp96
tmp103 = tl_math.exp(tmp102)
tmp104 = tmp101 + tmp103
tmp105 = tmp95 - tmp96
tmp106 = tl_math.exp(tmp105)
tmp107 = tmp104 + tmp106
tmp110 = tmp109 * tmp3
tmp111 = tl.where(tmp108, tmp109, tmp110)
tmp112 = tl.where(tmp0, tmp111, tmp6)
tmp115 = tmp114 * tmp3
tmp116 = tl.where(tmp113, tmp114, tmp115)
tmp117 = tl.where(tmp8, tmp116, tmp6)
tmp118 = triton_helpers.maximum(tmp112, tmp117)
tmp121 = tmp120 * tmp3
tmp122 = tl.where(tmp119, tmp120, tmp121)
tmp123 = tl.where(tmp15, tmp122, tmp6)
tmp124 = triton_helpers.maximum(tmp118, tmp123)
tmp127 = tmp126 * tmp3
tmp128 = tl.where(tmp125, tmp126, tmp127)
tmp129 = tl.where(tmp22, tmp128, tmp6)
tmp130 = triton_helpers.maximum(tmp124, tmp129)
tmp131 = tmp112 - tmp130
tmp132 = tl_math.exp(tmp131)
tmp133 = tmp117 - tmp130
tmp134 = tl_math.exp(tmp133)
tmp135 = tmp132 + tmp134
tmp136 = tmp123 - tmp130
tmp137 = tl_math.exp(tmp136)
tmp138 = tmp135 + tmp137
tmp139 = tmp129 - tmp130
tmp140 = tl_math.exp(tmp139)
tmp141 = tmp138 + tmp140
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
tl.store(out_ptr2 + x0, tmp62, xmask)
tl.store(out_ptr3 + x0, tmp73, xmask)
tl.store(out_ptr4 + x0, tmp96, xmask)
tl.store(out_ptr5 + x0, tmp107, xmask)
tl.store(out_ptr6 + x0, tmp130, xmask)
tl.store(out_ptr7 + x0, tmp141, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_3(in_out_ptr0,
in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10,
in_ptr11, in_ptr12, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr4 + x2, xmask).to(tl.int1)
tmp14 = tl.load(in_out_ptr1 + x2, xmask)
tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr7 + x2, xmask).to(tl.int1)
tmp24 = tl.load(in_out_ptr2 + x2, xmask)
tmp28 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr10 + x2, xmask).to(tl.int1)
tmp34 = tl.load(in_out_ptr3 + x2, xmask)
tmp38 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tmp15 = tmp14 * tmp3
tmp16 = tl.where(tmp13, tmp14, tmp15)
tmp17 = tl.where(tmp0, tmp16, tmp6)
tmp19 = tmp17 - tmp18
tmp20 = tl_math.exp(tmp19)
tmp22 = tmp20 / tmp21
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp0, tmp26, tmp6)
tmp29 = tmp27 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp32 = tmp30 / tmp31
tmp35 = tmp34 * tmp3
tmp36 = tl.where(tmp33, tmp34, tmp35)
tmp37 = tl.where(tmp0, tmp36, tmp6)
tmp39 = tmp37 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp42 = tmp40 / tmp41
tl.store(in_out_ptr0 + x2, tmp12, xmask)
tl.store(in_out_ptr1 + x2, tmp22, xmask)
tl.store(in_out_ptr2 + x2, tmp32, xmask)
tl.store(in_out_ptr3 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused_cat_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tl.full([1], 16, tl.int64)
tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_5(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp23 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp11 = tmp10 * tmp3
tmp12 = tl.where(tmp9, tmp10, tmp11)
tmp13 = tl.where(tmp8, tmp12, tmp6)
tmp14 = triton_helpers.maximum(tmp7, tmp13)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp16, tmp17, tmp18)
tmp20 = tl.where(tmp15, tmp19, tmp6)
tmp21 = triton_helpers.maximum(tmp14, tmp20)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp23, tmp24, tmp25)
tmp27 = tl.where(tmp22, tmp26, tmp6)
tmp28 = triton_helpers.maximum(tmp21, tmp27)
tmp29 = tmp7 - tmp28
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp13 - tmp28
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp30 + tmp32
tmp34 = tmp20 - tmp28
tmp35 = tl_math.exp(tmp34)
tmp36 = tmp33 + tmp35
tmp37 = tmp27 - tmp28
tmp38 = tl_math.exp(tmp37)
tmp39 = tmp36 + tmp38
tl.store(out_ptr0 + x0, tmp28, xmask)
tl.store(out_ptr1 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused__softmax_leaky_relu_mul_where_6(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = 4.0
tmp4 = tmp2 * tmp3
tmp5 = tl.where(tmp1, tmp2, tmp4)
tmp6 = -8999999815811072.0
tmp7 = tl.where(tmp0, tmp5, tmp6)
tmp9 = tmp7 - tmp8
tmp10 = tl_math.exp(tmp9)
tmp12 = tmp10 / tmp11
tl.store(in_out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_elu_7(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tmp7 + tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, buf1, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 8), (8, 1), 0),
primals_3, out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](primals_4, buf4, 64, XBLOCK
=64, num_warps=1, num_stages=1)
del primals_4
buf9 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf9, buf10, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf10, (64, 8), (8, 1), 0),
primals_6, out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf17, buf18, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (64, 8), (8, 1), 0),
primals_8, out=buf19)
buf20 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf19, buf20, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf25, buf26, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf27 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf26, (64, 8), (8, 1), 0),
primals_10, out=buf27)
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf27, buf28, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf13 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf21 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf22 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf29 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf30 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_leaky_relu_mul_where_2[grid(16)](buf4,
buf3, buf2, buf12, buf11, buf20, buf19, buf28, buf27, buf5,
buf6, buf13, buf14, buf21, buf22, buf29, buf30, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0)
del buf2
buf15 = reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0)
del buf11
buf23 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
buf31 = reinterpret_tensor(buf27, (4, 4, 4), (16, 4, 1), 0)
del buf27
triton_poi_fused__softmax_leaky_relu_mul_where_3[grid(64)](buf7,
buf15, buf23, buf31, buf4, buf3, buf5, buf6, buf12, buf13,
buf14, buf20, buf21, buf22, buf28, buf29, buf30, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf13
del buf14
del buf21
del buf22
del buf29
del buf30
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf7, reinterpret_tensor(buf0, (4, 4, 4), (16, 4,
1), 0), out=buf8)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf15, reinterpret_tensor(buf9, (4, 4, 4), (16,
4, 1), 0), out=buf16)
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf23, reinterpret_tensor(buf17, (4, 4, 4), (16,
4, 1), 0), out=buf24)
buf32 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf31, reinterpret_tensor(buf25, (4, 4, 4), (16,
4, 1), 0), out=buf32)
buf33 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_cat_4[grid(256)](buf8, buf16, buf24, buf32, buf33,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf34 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf33, (16, 16), (16, 1), 0),
primals_11, out=buf34)
buf35 = empty_strided_cuda((4, 16, 8), (128, 8, 1), torch.float32)
triton_poi_fused_cat_0[grid(512)](buf34, buf35, 512, XBLOCK=128,
num_warps=4, num_stages=1)
buf36 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf35, (64, 8), (8, 1), 0),
primals_12, out=buf36)
buf37 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_1[grid(64)](buf36, buf37, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf38 = buf6
del buf6
buf39 = buf5
del buf5
triton_poi_fused__softmax_leaky_relu_mul_where_5[grid(16)](buf4,
buf37, buf36, buf38, buf39, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf40 = reinterpret_tensor(buf36, (4, 4, 4), (16, 4, 1), 0)
del buf36
triton_poi_fused__softmax_leaky_relu_mul_where_6[grid(64)](buf40,
buf4, buf37, buf38, buf39, 64, XBLOCK=64, num_warps=1, num_stages=1
)
del buf38
del buf39
buf41 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf40, reinterpret_tensor(buf34, (4, 4, 4), (16,
4, 1), 0), out=buf41)
buf42 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_elu_7[grid(64)](buf41, primals_1, buf42, 64,
XBLOCK=64, num_warps=1, num_stages=1)
return (buf42, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20,
buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41,
reinterpret_tensor(buf34, (4, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf35, (8, 64), (1, 8), 0), reinterpret_tensor(
primals_12, (1, 8), (1, 1), 0), reinterpret_tensor(buf33, (16, 16),
(1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0),
reinterpret_tensor(buf25, (4, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf26, (8, 64), (1, 8), 0), reinterpret_tensor(
primals_10, (1, 8), (1, 1), 0), reinterpret_tensor(primals_1, (4,
16), (1, 4), 0), reinterpret_tensor(buf17, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf18, (8, 64), (1, 8), 0),
reinterpret_tensor(primals_8, (1, 8), (1, 1), 0),
reinterpret_tensor(buf9, (4, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf10, (8, 64), (1, 8), 0), reinterpret_tensor(
primals_6, (1, 8), (1, 1), 0), reinterpret_tensor(buf0, (4, 4, 4),
(16, 1, 4), 0), reinterpret_tensor(buf1, (8, 64), (1, 8), 0),
reinterpret_tensor(primals_3, (1, 8), (1, 1), 0))
def attention(query, key, value, mask=None, dropout=None, return_scores=False):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
scores = dropout(scores)
if return_scores:
return torch.matmul(p_attn, value), p_attn, scores
else:
return torch.matmul(p_attn, value), p_attn
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
B, N = h.size()[0], h.size()[1]
a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat
(1, N, 1)], dim=2).view(B, N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=2)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GATNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, nlayers=2):
"""Dense version of GAT."""
super(GATNew, self).__init__()
self.dropout = dropout
self.nlayers = nlayers
self.nheads = nheads
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
if self.nlayers > 2:
for i in range(self.nlayers - 2):
for j in range(self.nheads):
self.add_module('attention_{}_{}'.format(i + 1, j),
GraphAttentionLayer(nhid * nheads, nhid, dropout=
dropout, alpha=alpha, concat=True))
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=
dropout, alpha=alpha, concat=False)
def forward(self, input_0, input_1):
primals_2 = self.attention_0.W
primals_3 = self.attention_0.a
primals_5 = self.attention_1.W
primals_6 = self.attention_1.a
primals_7 = self.attention_2.W
primals_8 = self.attention_2.a
primals_9 = self.attention_3.W
primals_10 = self.attention_3.a
primals_11 = self.out_att.W
primals_12 = self.out_att.a
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| qinyan-li/DocEE | GAT | false | 7,555 | [
"MIT"
] | 1 | e8d2202a44907df5f12f9a67180d849a54421ab7 | https://github.com/qinyan-li/DocEE/tree/e8d2202a44907df5f12f9a67180d849a54421ab7 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def attention(query, key, value, mask=None, dropout=None, return_scores=False):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1000000000.0)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
scores = dropout(scores)
if return_scores:
return torch.matmul(p_attn, value), p_attn, scores
else:
return torch.matmul(p_attn, value), p_attn
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super().__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
B, N = h.size()[0], h.size()[1]
a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat
(1, N, 1)], dim=2).view(B, N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=2)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class Model(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, nlayers=2):
"""Dense version of GAT."""
super().__init__()
self.dropout = dropout
self.nlayers = nlayers
self.nheads = nheads
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
if self.nlayers > 2:
for i in range(self.nlayers - 2):
for j in range(self.nheads):
self.add_module('attention_{}_{}'.format(i + 1, j),
GraphAttentionLayer(nhid * nheads, nhid, dropout=
dropout, alpha=alpha, concat=True))
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=
dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
input = x
x = torch.cat([att(x, adj) for att in self.attentions], dim=2)
if self.nlayers > 2:
for i in range(self.nlayers - 2):
temp = []
x = F.dropout(x, self.dropout, training=self.training)
cur_input = x
for j in range(self.nheads):
temp.append(self.__getattr__('attention_{}_{}'.format(i +
1, j))(x, adj))
x = torch.cat(temp, dim=2) + cur_input
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return x + input
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nh
# ... truncated (>4000 chars) for memory efficiency |
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k4/ck4ug2qsg67ysziasthxk6zcwqznbzrg2ft7ztrca5chplmx2o2r.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.celu]
# Source node to ATen node mapping:
# x => expm1, gt, where
# Graph fragment:
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%view_1,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %expm1), kwargs = {})
triton_poi_fused_celu_0 = async_compile.triton('triton_poi_fused_celu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_celu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_celu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = libdevice.expm1(tmp0)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5u/c5upjzsvmsap2aldcm5fmrqt3utut3iuuoh3ive6xkwmdlbozsml.py
# Topologically Sorted Source Nodes: [tanh, x_2, x_3], Original ATen: [aten.tanh, aten.mul, aten.add]
# Source node to ATen node mapping:
# tanh => tanh
# x_2 => mul
# x_3 => add
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where_1, %tanh), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {})
triton_poi_fused_add_mul_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6c/c6ckolqhuueczvrgbychyipdueiy2ybtgdpe3cfadeylovc2jpy7.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add_1, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q2/cq23g6fox4vfwvfbgppwfe66qqc5ghghxuszflfuisxnvi33gw4h.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_4 => add_1, add_2, mul_1, mul_2, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_9), kwargs = {})
triton_poi_fused_native_layer_norm_3 = async_compile.triton('triton_poi_fused_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.celu]
stream0 = get_raw_stream(0)
triton_poi_fused_celu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.celu]
triton_poi_fused_celu_0.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tanh, x_2, x_3], Original ATen: [aten.tanh, aten.mul, aten.add]
triton_poi_fused_add_mul_tanh_1.run(primals_1, buf3, buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf5, buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_3.run(buf5, buf6, buf7, primals_8, primals_9, buf8, 256, grid=grid(256), stream=stream0)
del buf6
del buf7
del primals_9
return (buf8, primals_8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), buf2, buf3, buf4, buf5, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class LinearAndMultiply(nn.Module):
def __init__(self, input_size, output_size, use_multiply=True,
linear_block=nn.Linear):
super().__init__()
self._activation = nn.CELU()
self._linear = linear_block(input_size, output_size)
self._use_multiply = use_multiply
if self._use_multiply:
self._to_multiplier = linear_block(output_size, output_size)
def forward(self, x, *extra):
x = self._activation(self._linear(x, *extra))
if not self._use_multiply:
return x
return x * torch.tanh(self._to_multiplier(x, *extra))
class ResBlock(nn.Module):
def __init__(self, input_size, output_size, use_multiply=True,
linear_block=nn.Linear, use_norm=True):
super().__init__()
self._linear_block = LinearAndMultiply(input_size, output_size,
use_multiply=False, linear_block=linear_block)
self._mul_block = LinearAndMultiply(output_size, output_size,
use_multiply=use_multiply, linear_block=linear_block)
self._use_norm = use_norm
if self._use_norm:
self._norm = nn.LayerNorm(output_size)
self._pad_size = output_size - input_size
assert self._pad_size >= 0
def forward(self, x, *extra):
padded_input = F.pad(x, (0, self._pad_size))
x = self._mul_block(self._linear_block(x, *extra), *extra)
x = padded_input + x
if self._use_norm:
x = self._norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_celu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = libdevice.expm1(tmp0)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_mul_tanh_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_celu_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_celu_0[grid(256)](buf2, buf3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_tanh_1[grid(256)](primals_1, buf3, buf4,
buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(64)](buf5, buf6, buf7, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_3[grid(256)](buf5, buf6, buf7,
primals_8, primals_9, buf8, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf6
del buf7
del primals_9
return buf8, primals_8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), buf2, buf3, buf4, buf5, primals_6, primals_4
class LinearAndMultiply(nn.Module):
def __init__(self, input_size, output_size, use_multiply=True,
linear_block=nn.Linear):
super().__init__()
self._activation = nn.CELU()
self._linear = linear_block(input_size, output_size)
self._use_multiply = use_multiply
if self._use_multiply:
self._to_multiplier = linear_block(output_size, output_size)
def forward(self, x, *extra):
x = self._activation(self._linear(x, *extra))
if not self._use_multiply:
return x
return x * torch.tanh(self._to_multiplier(x, *extra))
class ResBlockNew(nn.Module):
def __init__(self, input_size, output_size, use_multiply=True,
linear_block=nn.Linear, use_norm=True):
super().__init__()
self._linear_block = LinearAndMultiply(input_size, output_size,
use_multiply=False, linear_block=linear_block)
self._mul_block = LinearAndMultiply(output_size, output_size,
use_multiply=use_multiply, linear_block=linear_block)
self._use_norm = use_norm
if self._use_norm:
self._norm = nn.LayerNorm(output_size)
self._pad_size = output_size - input_size
assert self._pad_size >= 0
def forward(self, input_0):
primals_2 = self._linear_block._linear.weight
primals_3 = self._linear_block._linear.bias
primals_4 = self._mul_block._linear.weight
primals_5 = self._mul_block._linear.bias
primals_6 = self._mul_block._to_multiplier.weight
primals_7 = self._mul_block._to_multiplier.bias
primals_8 = self._norm.weight
primals_9 = self._norm.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| rgreenblatt/path | ResBlock | false | 7,556 | [
"MIT"
] | 1 | 2057618ee3a6067c230c1c1c40856d2c9f5006b0 | https://github.com/rgreenblatt/path/tree/2057618ee3a6067c230c1c1c40856d2c9f5006b0 | import torch
from torch import nn
import torch.nn.functional as F
class LinearAndMultiply(nn.Module):
def __init__(self, input_size, output_size, use_multiply=True,
linear_block=nn.Linear):
super().__init__()
self._activation = nn.CELU()
self._linear = linear_block(input_size, output_size)
self._use_multiply = use_multiply
if self._use_multiply:
self._to_multiplier = linear_block(output_size, output_size)
def forward(self, x, *extra):
x = self._activation(self._linear(x, *extra))
if not self._use_multiply:
return x
return x * torch.tanh(self._to_multiplier(x, *extra))
class Model(nn.Module):
def __init__(self, input_size, output_size, use_multiply=True,
linear_block=nn.Linear, use_norm=True):
super().__init__()
self._linear_block = LinearAndMultiply(input_size, output_size,
use_multiply=False, linear_block=linear_block)
self._mul_block = LinearAndMultiply(output_size, output_size,
use_multiply=use_multiply, linear_block=linear_block)
self._use_norm = use_norm
if self._use_norm:
self._norm = nn.LayerNorm(output_size)
self._pad_size = output_size - input_size
assert self._pad_size >= 0
def forward(self, x, *extra):
padded_input = F.pad(x, (0, self._pad_size))
x = self._mul_block(self._linear_block(x, *extra), *extra)
x = padded_input + x
if self._use_norm:
x = self._norm(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SAM | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/gk/cgkommcvngi74phzg5zy3lc7uws777uww4n5ihrudutssbv4626j.py
# Topologically Sorted Source Nodes: [spat_att], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# spat_att => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%avg_pool3d, %getitem], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2) + (64*x1)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2) + (64*x1)), tmp4 & xmask, other=0.0)
tmp7 = tmp6 + tmp5
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2) + (64*x1)), tmp4 & xmask, other=0.0)
tmp9 = tmp8 + tmp7
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2) + (64*x1)), tmp4 & xmask, other=0.0)
tmp11 = tmp10 + tmp9
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr1 + (x0 + (16*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp4, tmp15, tmp19)
tl.store(out_ptr0 + (x3), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgofqcgduqrtcjakfd7uk3wkcrpwsqxispluihwsstry6ekodk2u.py
# Topologically Sorted Source Nodes: [conv2d, spat_att_1], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d => convolution
# spat_att_1 => sigmoid
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [max_pool3d], Original ATen: [aten.max_pool3d_with_indices]
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [4, 1, 1], [4, 1, 1])
buf1 = buf0[0]
del buf0
buf3 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [spat_att], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf1, buf3, 128, grid=grid(128), stream=stream0)
del buf1
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d, spat_att_1], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_1.run(buf5, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf5, primals_2, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SAM(nn.Module):
def __init__(self, channels_in):
super(SAM, self).__init__()
self.channels_in = channels_in
self.avg_pool = nn.AvgPool3d(kernel_size=(self.channels_in, 1, 1))
self.max_pool = nn.MaxPool3d(kernel_size=(self.channels_in, 1, 1))
self.conv1 = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7,
stride=1, padding=3)
def forward(self, x, save_attention=False):
spat_att = torch.cat(tensors=(self.avg_pool(x), self.max_pool(x)),
dim=1)
spat_att = torch.sigmoid(self.conv1(spat_att))
if save_attention:
torch.save(spat_att,
f'tmp/cbam-attention_spatial_{spat_att.shape[-2]}-{spat_att.shape[-1]}.pt'
)
return spat_att
def initialize_weights(self):
nn.init.normal_(self.conv1.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.conv1.bias.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2 + 64 * x1), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2 + 64 * x1), tmp4 & xmask,
other=0.0)
tmp7 = tmp6 + tmp5
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2 + 64 * x1), tmp4 & xmask,
other=0.0)
tmp9 = tmp8 + tmp7
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2 + 64 * x1), tmp4 & xmask,
other=0.0)
tmp11 = tmp10 + tmp9
tmp12 = 0.25
tmp13 = tmp11 * tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr1 + (x0 + 16 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp4, tmp15, tmp19)
tl.store(out_ptr0 + x3, tmp20, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [4,
1, 1], [4, 1, 1])
buf1 = buf0[0]
del buf0
buf3 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf1, buf3, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_1
buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 1, 4, 4), (16, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_sigmoid_1[grid(64)](buf5, primals_3,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf5, primals_2, buf3, buf5
class SAMNew(nn.Module):
def __init__(self, channels_in):
super(SAMNew, self).__init__()
self.channels_in = channels_in
self.avg_pool = nn.AvgPool3d(kernel_size=(self.channels_in, 1, 1))
self.max_pool = nn.MaxPool3d(kernel_size=(self.channels_in, 1, 1))
self.conv1 = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7,
stride=1, padding=3)
def initialize_weights(self):
nn.init.normal_(self.conv1.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.conv1.bias.data, 0.0)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | SAM | false | 7,557 | [
"MIT"
] | 1 | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | https://github.com/rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets/tree/75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, channels_in):
super().__init__()
self.channels_in = channels_in
self.avg_pool = nn.AvgPool3d(kernel_size=(self.channels_in, 1, 1))
self.max_pool = nn.MaxPool3d(kernel_size=(self.channels_in, 1, 1))
self.conv1 = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7,
stride=1, padding=3)
def forward(self, x, save_attention=False):
spat_att = torch.cat(tensors=(self.avg_pool(x), self.max_pool(x)),
dim=1)
spat_att = torch.sigmoid(self.conv1(spat_att))
if save_attention:
torch.save(spat_att,
f'tmp/cbam-attention_spatial_{spat_att.shape[-2]}-{spat_att.shape[-1]}.pt'
)
return spat_att
def initialize_weights(self):
nn.init.normal_(self.conv1.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.conv1.bias.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Affine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/we/cwegr75gc7slhvygkh4qgpti3y7cw7j23tllhdeulaje2nyjxbbr.py
# Topologically Sorted Source Nodes: [addcmul], Original ATen: [aten.addcmul]
# Source node to ATen node mapping:
# addcmul => add, mul, mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul_1), kwargs = {})
triton_poi_fused_addcmul_0 = async_compile.triton('triton_poi_fused_addcmul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_addcmul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_addcmul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [addcmul], Original ATen: [aten.addcmul]
stream0 = get_raw_stream(0)
triton_poi_fused_addcmul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf0, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 1, 4), (4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones((1, 1, dim)))
self.beta = nn.Parameter(torch.zeros((1, 1, dim)))
def forward(self, x):
return torch.addcmul(self.beta, self.alpha, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_addcmul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp5 = tmp3 * tmp4
tmp6 = tmp0 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_2, (1, 1, 4), (4, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_addcmul_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf0, primals_3
class AffineNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones((1, 1, dim)))
self.beta = nn.Parameter(torch.zeros((1, 1, dim)))
def forward(self, input_0):
primals_1 = self.alpha
primals_2 = self.beta
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| rioyokotalab/pytorch-image-models | Affine | false | 7,558 | [
"Apache-2.0"
] | 1 | 87d8d3c14b64bb6a76402f363a1e1ee1829bca93 | https://github.com/rioyokotalab/pytorch-image-models/tree/87d8d3c14b64bb6a76402f363a1e1ee1829bca93 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
class Model(nn.Module):
def __init__(self, dim):
super().__init__()
self.alpha = nn.Parameter(torch.ones((1, 1, dim)))
self.beta = nn.Parameter(torch.zeros((1, 1, dim)))
def forward(self, x):
return torch.addcmul(self.beta, self.alpha, x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
PositionalAttentionModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ko/ckow7ci7f3mygm6ujdzdisip6tet25h4hj6uestesqalhkarwrrw.py
# Topologically Sorted Source Nodes: [S], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# S => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + (16*x0)), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ei/ceid4re34lmuazxtcmv2cljn2v2emu56yzgps5z74exnddnidgi3.py
# Topologically Sorted Source Nodes: [mul, E], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# E => add
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_8, %view_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_1, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf4, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf5 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf5, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0), out=buf6)
buf9 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [S], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf6, buf9, 64, 16, grid=grid(64), stream=stream0)
del buf6
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf9, (4, 16, 16), (256, 1, 16), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, E], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_2.run(primals_8, buf10, primals_1, buf11, 256, grid=grid(256), stream=stream0)
return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8, buf9, buf10, reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0), reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(buf1, (4, 16, 4), (64, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalAttentionModule(nn.Module):
def __init__(self, in_channels):
super(PositionalAttentionModule, self).__init__()
self.in_channels = in_channels
self.conv_B = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.conv_C = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.conv_D = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.alpha = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, A):
batchsize, num_channels, height, width = A.shape
N = height * width
B = self.conv_B(A).view((batchsize, num_channels, N))
C = self.conv_C(A).view((batchsize, num_channels, N))
D = self.conv_D(A).view((batchsize, num_channels, N))
S = F.softmax(torch.bmm(C.permute(0, 2, 1), B), dim=-1)
DS = torch.bmm(D, S.permute(0, 2, 1)).view((batchsize, num_channels,
height, width))
E = self.alpha * DS + A
return E
def initialize_weights(self):
for layer in [self.conv_B, self.conv_C, self.conv_D]:
nn.init.normal_(layer.weight.data, mean=0.0, std=0.02)
nn.init.constant_(layer.bias.data, 0.0)
nn.init.constant_(self.alpha.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tmp6 / tmp10
tl.store(out_ptr2 + (r1 + 16 * x0), tmp11, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(primals_1, primals_4, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = extern_kernels.convolution(primals_1, primals_6, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_0[grid(256)](buf4, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = buf2
del buf2
triton_poi_fused_convolution_0[grid(256)](buf5, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 16, 4), (64, 1, 16),
0), reinterpret_tensor(buf1, (4, 4, 16), (64, 16, 1), 0), out=buf6)
buf9 = empty_strided_cuda((4, 16, 16), (256, 16, 1), torch.float32)
triton_per_fused__softmax_1[grid(64)](buf6, buf9, 64, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del buf6
buf10 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (4, 4, 16), (64, 16, 1),
0), reinterpret_tensor(buf9, (4, 16, 16), (256, 1, 16), 0), out
=buf10)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(256)](primals_8, buf10, primals_1,
buf11, 256, XBLOCK=128, num_warps=4, num_stages=1)
return (buf11, primals_1, primals_2, primals_4, primals_6, primals_8,
buf9, buf10, reinterpret_tensor(buf4, (4, 16, 4), (64, 1, 16), 0),
reinterpret_tensor(buf5, (4, 4, 16), (64, 16, 1), 0),
reinterpret_tensor(buf1, (4, 16, 4), (64, 1, 16), 0))
class PositionalAttentionModuleNew(nn.Module):
def __init__(self, in_channels):
super(PositionalAttentionModuleNew, self).__init__()
self.in_channels = in_channels
self.conv_B = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.conv_C = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.conv_D = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.alpha = nn.Parameter(torch.zeros(1), requires_grad=True)
def initialize_weights(self):
for layer in [self.conv_B, self.conv_C, self.conv_D]:
nn.init.normal_(layer.weight.data, mean=0.0, std=0.02)
nn.init.constant_(layer.bias.data, 0.0)
nn.init.constant_(self.alpha.data, 0.0)
def forward(self, input_0):
primals_8 = self.alpha
primals_2 = self.conv_B.weight
primals_3 = self.conv_B.bias
primals_4 = self.conv_C.weight
primals_5 = self.conv_C.bias
primals_6 = self.conv_D.weight
primals_7 = self.conv_D.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | PositionalAttentionModule | false | 7,559 | [
"MIT"
] | 1 | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | https://github.com/rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets/tree/75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.conv_B = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.conv_C = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.conv_D = nn.Conv2d(in_channels=self.in_channels, out_channels=
self.in_channels, kernel_size=1, stride=1, padding=0)
self.alpha = nn.Parameter(torch.zeros(1), requires_grad=True)
def forward(self, A):
batchsize, num_channels, height, width = A.shape
N = height * width
B = self.conv_B(A).view((batchsize, num_channels, N))
C = self.conv_C(A).view((batchsize, num_channels, N))
D = self.conv_D(A).view((batchsize, num_channels, N))
S = F.softmax(torch.bmm(C.permute(0, 2, 1), B), dim=-1)
DS = torch.bmm(D, S.permute(0, 2, 1)).view((batchsize, num_channels,
height, width))
E = self.alpha * DS + A
return E
def initialize_weights(self):
for layer in [self.conv_B, self.conv_C, self.conv_D]:
nn.init.normal_(layer.weight.data, mean=0.0, std=0.02)
nn.init.constant_(layer.bias.data, 0.0)
nn.init.constant_(self.alpha.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
UpConcat2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/i3/ci3zxiw7jwdnzvz4mmk5mwwsff5vz4qx3uzzpitdind3hfy3i3vq.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %slice_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 64) % 8
x3 = (xindex // 512)
x4 = xindex % 64
x1 = (xindex // 8) % 8
x0 = xindex % 8
x7 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (64*x2) + (256*x3)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x2), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 8, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = (-2) + x1
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = (-2) + x0
tmp19 = tmp18 >= tmp1
tmp20 = tmp18 < tmp3
tmp21 = tmp16 & tmp17
tmp22 = tmp21 & tmp19
tmp23 = tmp22 & tmp20
tmp24 = tmp23 & tmp12
tmp25 = tl.load(in_ptr2 + ((-10) + x0 + (4*x1) + (16*((-4) + x2)) + (64*x3)), tmp24, other=0.0)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp12, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp11, tmp27)
tl.store(out_ptr0 + (x7), tmp28, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vt/cvtonbmisuslzwvf7wtu3i7uuhphfmfg3kvfexqxplfygxltvwg6.py
# Topologically Sorted Source Nodes: [conv_transpose2d, up], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# up => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1))
buf1 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 2048, grid=grid(2048), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv_transpose2d, up], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf0, primals_2, buf2, 1024, grid=grid(1024), stream=stream0)
del buf0
del primals_2
return (buf1, primals_1, primals_3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class UpConcat2d(nn.Module):
def __init__(self, in_channels_conv, out_channels_conv, scale_factor=2):
super(UpConcat2d, self).__init__()
self.in_channels_conv = in_channels_conv
self.out_channels_conv = out_channels_conv
self.scale_factor = scale_factor
self.up = nn.ConvTranspose2d(in_channels=self.in_channels_conv,
out_channels=self.out_channels_conv, kernel_size=2, stride=2,
padding=0)
if scale_factor == 4:
self.up2 = nn.ConvTranspose2d(in_channels=self.
out_channels_conv, out_channels=self.out_channels_conv,
kernel_size=2, stride=2, padding=0)
def forward(self, x_down, x_enc):
up = F.relu(self.up(x_down))
if self.scale_factor == 4:
up = F.relu(self.up2(up))
if up.shape[-1] > x_enc.shape[-1]:
p = (up.shape[-1] - x_enc.shape[-1]) // 2
if (up.shape[-1] - x_enc.shape[-1]) % 2 != 0:
p += 1
x_enc = F.pad(x_enc, (p, p, p, p))
start = [(x_enc.shape[-2] - up.shape[-2]) // 2, (x_enc.shape[-1] -
up.shape[-1]) // 2]
length = [up.shape[-2], up.shape[-1]]
crop = torch.narrow(torch.narrow(x_enc, dim=2, start=start[0],
length=length[0]), dim=3, start=start[1], length=length[1])
cat = torch.cat(tensors=(up, crop), dim=1)
return cat
def initialize_weights(self):
nn.init.normal_(self.up.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.up.bias.data, 0.0)
if self.scale_factor == 4:
nn.init.normal_(self.up2.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.up2.bias.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels_conv': 4, 'out_channels_conv': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 64 % 8
x3 = xindex // 512
x4 = xindex % 64
x1 = xindex // 8 % 8
x0 = xindex % 8
x7 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 64 * x2 + 256 * x3), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x2, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp15 = -2 + x1
tmp16 = tmp15 >= tmp1
tmp17 = tmp15 < tmp3
tmp18 = -2 + x0
tmp19 = tmp18 >= tmp1
tmp20 = tmp18 < tmp3
tmp21 = tmp16 & tmp17
tmp22 = tmp21 & tmp19
tmp23 = tmp22 & tmp20
tmp24 = tmp23 & tmp12
tmp25 = tl.load(in_ptr2 + (-10 + x0 + 4 * x1 + 16 * (-4 + x2) + 64 * x3
), tmp24, other=0.0)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp12, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp11, tmp27)
tl.store(out_ptr0 + x7, tmp28, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 8, 8), (256, 64, 8, 1))
buf1 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(2048)](buf0, primals_2, primals_4, buf1,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(1024)](buf0
, primals_2, buf2, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3, buf2
class UpConcat2dNew(nn.Module):
def __init__(self, in_channels_conv, out_channels_conv, scale_factor=2):
super(UpConcat2dNew, self).__init__()
self.in_channels_conv = in_channels_conv
self.out_channels_conv = out_channels_conv
self.scale_factor = scale_factor
self.up = nn.ConvTranspose2d(in_channels=self.in_channels_conv,
out_channels=self.out_channels_conv, kernel_size=2, stride=2,
padding=0)
if scale_factor == 4:
self.up2 = nn.ConvTranspose2d(in_channels=self.
out_channels_conv, out_channels=self.out_channels_conv,
kernel_size=2, stride=2, padding=0)
def initialize_weights(self):
nn.init.normal_(self.up.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.up.bias.data, 0.0)
if self.scale_factor == 4:
nn.init.normal_(self.up2.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.up2.bias.data, 0.0)
def forward(self, input_0, input_1):
primals_1 = self.up.weight
primals_2 = self.up.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | UpConcat2d | false | 7,560 | [
"MIT"
] | 1 | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | https://github.com/rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets/tree/75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels_conv, out_channels_conv, scale_factor=2):
super().__init__()
self.in_channels_conv = in_channels_conv
self.out_channels_conv = out_channels_conv
self.scale_factor = scale_factor
self.up = nn.ConvTranspose2d(in_channels=self.in_channels_conv,
out_channels=self.out_channels_conv, kernel_size=2, stride=2,
padding=0)
if scale_factor == 4:
self.up2 = nn.ConvTranspose2d(in_channels=self.
out_channels_conv, out_channels=self.out_channels_conv,
kernel_size=2, stride=2, padding=0)
def forward(self, x_down, x_enc):
up = F.relu(self.up(x_down))
if self.scale_factor == 4:
up = F.relu(self.up2(up))
if up.shape[-1] > x_enc.shape[-1]:
p = (up.shape[-1] - x_enc.shape[-1]) // 2
if (up.shape[-1] - x_enc.shape[-1]) % 2 != 0:
p += 1
x_enc = F.pad(x_enc, (p, p, p, p))
start = [(x_enc.shape[-2] - up.shape[-2]) // 2, (x_enc.shape[-1] -
up.shape[-1]) // 2]
length = [up.shape[-2], up.shape[-1]]
crop = torch.narrow(torch.narrow(x_enc, dim=2, start=start[0],
length=length[0]), dim=3, start=start[1], length=length[1])
cat = torch.cat(tensors=(up, crop), dim=1)
return cat
def initialize_weights(self):
nn.init.normal_(self.up.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.up.bias.data, 0.0)
if self.scale_factor == 4:
nn.init.normal_(self.up2.weight.data, mean=0.0, std=0.02)
nn.init.constant_(self.up2.bias.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DiscShiftLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/re/crek4nuwb2qmcio2vdn52756wis557jmlcfgmxomsorwtitkl7tg.py
# Topologically Sorted Source Nodes: [pow_1, loss, mul], Original ATen: [aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# loss => mean
# mul => mul
# pow_1 => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.1), kwargs = {})
triton_per_fused_mean_mul_pow_0 = async_compile.triton('triton_per_fused_mean_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 256.0
tmp6 = tmp4 / tmp5
tmp7 = 0.1
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pow_1, loss, mul], Original ATen: [aten.pow, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_pow_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiscShiftLoss(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super(DiscShiftLoss, self).__init__()
self.loss_weight = loss_weight
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Tensor with shape (n, c, h, w)
Returns:
Tensor: Loss.
"""
loss = torch.mean(x ** 2)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 256.0
tmp6 = tmp4 / tmp5
tmp7 = 0.1
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_pow_0[grid(1)](buf1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class DiscShiftLossNew(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super(DiscShiftLossNew, self).__init__()
self.loss_weight = loss_weight
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| rivergold/mmediting | DiscShiftLoss | false | 7,561 | [
"Apache-2.0"
] | 1 | fd972635c48bb065db29d1b5090592a87c7263d2 | https://github.com/rivergold/mmediting/tree/fd972635c48bb065db29d1b5090592a87c7263d2 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Tensor with shape (n, c, h, w)
Returns:
Tensor: Loss.
"""
loss = torch.mean(x ** 2)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nu/cnuc7ivckuuly7yn2763pwt3sw72jd6vuwpeeu4sfespm5iz7fq4.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fj/cfjl47pvhwbpfbvh6rfehwy5ijxc5p3zgkld2lwf3mw5bl6pbkak.py
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# weights => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 256, grid=grid(256), stream=stream0)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [weights], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (16, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7)
del primals_9
return (reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf6, (16, 16), (16, 1), 0), primals_8, reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Attention(nn.Module):
def __init__(self, heads, dim, hidden_dim):
super().__init__()
self.dim = dim
self.hdim = hidden_dim
self.heads = heads
self.to_q = nn.Linear(dim, hidden_dim * heads)
self.to_k = nn.Linear(dim, hidden_dim * heads)
self.to_v = nn.Linear(dim, hidden_dim * heads)
self.project = nn.Linear(heads * hidden_dim, dim)
def forward(self, x):
B, T, _ = x.shape
Q = self.to_q(x).view(B, self.heads, T, self.hdim)
K = self.to_k(x).view(B, self.heads, T, self.hdim)
V = self.to_v(x).view(B, self.heads, T, self.hdim)
weights = torch.softmax(Q.permute(0, 1, 3, 2) @ K / self.hdim **
0.5, dim=-1)
out = weights @ V.permute(0, 1, 3, 2)
out = self.project(out.view(B, T, self.heads * self.hdim))
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'heads': 4, 'dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (16, 4), (4, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (16, 4, 4), (16, 1, 4),
0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (16, 16),
(16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf7)
del primals_9
return reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf6, (16, 16), (16, 1), 0
), primals_8, reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (16, 4, 4), (16, 1, 4), 0)
class AttentionNew(nn.Module):
def __init__(self, heads, dim, hidden_dim):
super().__init__()
self.dim = dim
self.hdim = hidden_dim
self.heads = heads
self.to_q = nn.Linear(dim, hidden_dim * heads)
self.to_k = nn.Linear(dim, hidden_dim * heads)
self.to_v = nn.Linear(dim, hidden_dim * heads)
self.project = nn.Linear(heads * hidden_dim, dim)
def forward(self, input_0):
primals_2 = self.to_q.weight
primals_3 = self.to_q.bias
primals_4 = self.to_k.weight
primals_5 = self.to_k.bias
primals_6 = self.to_v.weight
primals_7 = self.to_v.bias
primals_8 = self.project.weight
primals_9 = self.project.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| rish-16/audio-tf-pytorch | Attention | false | 7,562 | [
"MIT"
] | 1 | 397a6e9f1a97cce774202d392eb9706f0483405c | https://github.com/rish-16/audio-tf-pytorch/tree/397a6e9f1a97cce774202d392eb9706f0483405c | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, heads, dim, hidden_dim):
super().__init__()
self.dim = dim
self.hdim = hidden_dim
self.heads = heads
self.to_q = nn.Linear(dim, hidden_dim * heads)
self.to_k = nn.Linear(dim, hidden_dim * heads)
self.to_v = nn.Linear(dim, hidden_dim * heads)
self.project = nn.Linear(heads * hidden_dim, dim)
def forward(self, x):
B, T, _ = x.shape
Q = self.to_q(x).view(B, self.heads, T, self.hdim)
K = self.to_k(x).view(B, self.heads, T, self.hdim)
V = self.to_v(x).view(B, self.heads, T, self.hdim)
weights = torch.softmax(Q.permute(0, 1, 3, 2) @ K / self.hdim **
0.5, dim=-1)
out = weights @ V.permute(0, 1, 3, 2)
out = self.project(out.view(B, T, self.heads * self.hdim))
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CharbonnierCompLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lc/clcduj6g2ymqc7h3ffa4va3domvxzvdiu55ogmltuljc3midllwe.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, sub_1, pow_1, add_1, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# loss => sqrt
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# pred_merged => add
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 1e-12
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = tmp18 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, sub_1, pow_1, add_1, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sqrt, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target) ** 2 + eps)
class CharbonnierCompLoss(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False,
eps=1e-12):
super(CharbonnierCompLoss, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * charbonnier_loss(pred_merged, ori_merged,
weight, eps=self.eps, reduction=self.reduction, sample_wise=
self.sample_wise)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import functools
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 1e-12
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = tmp18 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0[grid(1)](buf1,
arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target) ** 2 + eps)
class CharbonnierCompLossNew(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False,
eps=1e-12):
super(CharbonnierCompLossNew, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| rivergold/mmediting | CharbonnierCompLoss | false | 7,563 | [
"Apache-2.0"
] | 1 | fd972635c48bb065db29d1b5090592a87c7263d2 | https://github.com/rivergold/mmediting/tree/fd972635c48bb065db29d1b5090592a87c7263d2 | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target) ** 2 + eps)
class Model(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean
# ... truncated (>4000 chars) for memory efficiency |
sAG | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ol/col24ddvbqyefqt5rfz2zjvznr2qnowz53pao4cjyjumxfxxw3h4.py
# Topologically Sorted Source Nodes: [enc], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# enc => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%getitem, %avg_pool3d, %convolution], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 3
x0 = xindex % 16
x2 = (xindex // 48)
x3 = xindex
tmp25 = tl.load(in_ptr3 + (0))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (64*x2) + (64*((-1) + x1))), tmp9 & xmask, other=0.0)
tmp11 = tl.load(in_ptr1 + (16 + x0 + (64*x2) + (64*((-1) + x1))), tmp9 & xmask, other=0.0)
tmp12 = tmp11 + tmp10
tmp13 = tl.load(in_ptr1 + (32 + x0 + (64*x2) + (64*((-1) + x1))), tmp9 & xmask, other=0.0)
tmp14 = tmp13 + tmp12
tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x2) + (64*((-1) + x1))), tmp9 & xmask, other=0.0)
tmp16 = tmp15 + tmp14
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp9, tmp18, tmp19)
tmp21 = tmp0 >= tmp7
tmp22 = tl.full([1], 3, tl.int64)
tmp23 = tmp0 < tmp22
tmp24 = tl.load(in_ptr2 + (x0 + (16*x2)), tmp21 & xmask, eviction_policy='evict_last', other=0.0)
tmp27 = tmp24 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp21, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp20, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + (x3), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uv/cuvmuwhltr4vdkrv4aeidlasov6zpjxspaoakfc6bklsgflly3f4.py
# Topologically Sorted Source Nodes: [enc_1, dec_1, add, out], Original ATen: [aten.convolution, aten.add, aten.sigmoid]
# Source node to ATen node mapping:
# add => add
# dec_1 => convolution_3
# enc_1 => convolution_1
# out => sigmoid
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_4, %primals_5, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_9, %primals_10, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %convolution_3), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
triton_poi_fused_add_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_add_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_sigmoid_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_out_ptr0 + (x0), xmask)
tmp5 = tl.load(in_ptr2 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = tmp3 + tmp7
tmp9 = tl.sigmoid(tmp8)
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (1, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1, ), (1, ))
assert_size_stride(primals_9, (1, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_10, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [max_pool3d], Original ATen: [aten.max_pool3d_with_indices]
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [4, 1, 1], [4, 1, 1])
buf1 = buf0[0]
del buf0
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1))
buf4 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [enc], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf1, primals_1, buf3, primals_3, buf4, 192, grid=grid(192), stream=stream0)
del buf1
del buf3
del primals_3
# Topologically Sorted Source Nodes: [enc_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
# Topologically Sorted Source Nodes: [max_pool3d_1], Original ATen: [aten.max_pool3d_with_indices]
buf6 = torch.ops.aten.max_pool3d_with_indices.default(primals_6, [4, 1, 1], [4, 1, 1])
buf7 = buf6[0]
del buf6
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(primals_6, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 4, 4), (16, 16, 4, 1))
buf10 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dec], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(buf7, primals_6, buf9, primals_8, buf10, 192, grid=grid(192), stream=stream0)
del buf7
del buf9
del primals_8
# Topologically Sorted Source Nodes: [dec_1], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_9, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 1, 4, 4), (16, 16, 4, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [enc_1, dec_1, add, out], Original ATen: [aten.convolution, aten.add, aten.sigmoid]
triton_poi_fused_add_convolution_sigmoid_1.run(buf12, buf5, primals_5, primals_10, 64, grid=grid(64), stream=stream0)
del buf5
del primals_10
del primals_5
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_7, primals_9, buf4, buf10, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class sAG(nn.Module):
def __init__(self, num_channels_in_enc, num_channels_in_dec):
super(sAG, self).__init__()
self.num_channels_in_enc = num_channels_in_enc
self.num_channels_in_dec = num_channels_in_dec
self.ch_max_pool_enc = nn.MaxPool3d(kernel_size=(self.
num_channels_in_enc, 1, 1))
self.ch_avg_pool_enc = nn.AvgPool3d(kernel_size=(self.
num_channels_in_enc, 1, 1))
self.conv1_enc = nn.Conv2d(in_channels=self.num_channels_in_enc,
out_channels=1, kernel_size=1, stride=1, padding=0)
self.conv2_enc = nn.Conv2d(in_channels=3, out_channels=1,
kernel_size=7, stride=1, padding=3)
self.ch_max_pool_dec = nn.MaxPool3d(kernel_size=(self.
num_channels_in_dec, 1, 1))
self.ch_avg_pool_dec = nn.AvgPool3d(kernel_size=(self.
num_channels_in_dec, 1, 1))
self.conv1_dec = nn.Conv2d(in_channels=self.num_channels_in_dec,
out_channels=1, kernel_size=1, stride=1, padding=0)
self.conv2_dec = nn.Conv2d(in_channels=3, out_channels=1,
kernel_size=7, stride=1, padding=3)
def forward(self, enc, dec):
enc = torch.cat(tensors=(self.ch_max_pool_enc(enc), self.
ch_avg_pool_enc(enc), self.conv1_enc(enc)), dim=1)
enc = self.conv2_enc(enc)
dec = torch.cat(tensors=(self.ch_max_pool_dec(dec), self.
ch_avg_pool_dec(dec), self.conv1_dec(dec)), dim=1)
dec = self.conv2_dec(dec)
out = torch.sigmoid(enc + dec)
return out
def initialize_weights(self):
for layer in [self.conv1_enc, self.conv1_dec, self.conv2_enc, self.
conv2_dec]:
nn.init.normal_(layer.weight.data, mean=0.0, std=0.02)
nn.init.constant_(layer.bias.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels_in_enc': 4, 'num_channels_in_dec': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp25 = tl.load(in_ptr3 + 0)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 64 * x2 + 64 * (-1 + x1)), tmp9 & xmask,
other=0.0)
tmp11 = tl.load(in_ptr1 + (16 + x0 + 64 * x2 + 64 * (-1 + x1)), tmp9 &
xmask, other=0.0)
tmp12 = tmp11 + tmp10
tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x2 + 64 * (-1 + x1)), tmp9 &
xmask, other=0.0)
tmp14 = tmp13 + tmp12
tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x2 + 64 * (-1 + x1)), tmp9 &
xmask, other=0.0)
tmp16 = tmp15 + tmp14
tmp17 = 0.25
tmp18 = tmp16 * tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp9, tmp18, tmp19)
tmp21 = tmp0 >= tmp7
tl.full([1], 3, tl.int64)
tmp24 = tl.load(in_ptr2 + (x0 + 16 * x2), tmp21 & xmask,
eviction_policy='evict_last', other=0.0)
tmp27 = tmp24 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp21, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp20, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x3, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_convolution_sigmoid_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_out_ptr0 + x0, xmask)
tmp5 = tl.load(in_ptr2 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp7 = tmp4 + tmp6
tmp8 = tmp3 + tmp7
tmp9 = tl.sigmoid(tmp8)
tl.store(in_out_ptr0 + x0, tmp9, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (1, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1,), (1,))
assert_size_stride(primals_9, (1, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_10, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.max_pool3d_with_indices.default(primals_1, [4,
1, 1], [4, 1, 1])
buf1 = buf0[0]
del buf0
buf3 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 1, 4, 4), (16, 16, 4, 1))
buf4 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(192)](buf1, primals_1, buf3, primals_3,
buf4, 192, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del buf3
del primals_3
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 1, 4, 4), (16, 16, 4, 1))
buf6 = torch.ops.aten.max_pool3d_with_indices.default(primals_6, [4,
1, 1], [4, 1, 1])
buf7 = buf6[0]
del buf6
buf9 = extern_kernels.convolution(primals_6, primals_7, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 1, 4, 4), (16, 16, 4, 1))
buf10 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
triton_poi_fused_cat_0[grid(192)](buf7, primals_6, buf9, primals_8,
buf10, 192, XBLOCK=256, num_warps=4, num_stages=1)
del buf7
del buf9
del primals_8
buf11 = extern_kernels.convolution(buf10, primals_9, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 1, 4, 4), (16, 16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_add_convolution_sigmoid_1[grid(64)](buf12, buf5,
primals_5, primals_10, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf5
del primals_10
del primals_5
return (buf12, primals_1, primals_2, primals_4, primals_6, primals_7,
primals_9, buf4, buf10, buf12)
class sAGNew(nn.Module):
def __init__(self, num_channels_in_enc, num_channels_in_dec):
super(sAGNew, self).__init__()
self.num_channels_in_enc = num_channels_in_enc
self.num_channels_in_dec = num_channels_in_dec
self.ch_max_pool_enc = nn.MaxPool3d(kernel_size=(self.
num_channels_in_enc, 1, 1))
self.ch_avg_pool_enc = nn.AvgPool3d(kernel_size=(self.
num_channels_in_enc, 1, 1))
self.conv1_enc = nn.Conv2d(in_channels=self.num_channels_in_enc,
out_channels=1, kernel_size=1, stride=1, padding=0)
self.conv2_enc = nn.Conv2d(in_channels=3, out_channels=1,
kernel_size=7, stride=1, padding=3)
self.ch_max_pool_dec = nn.MaxPool3d(kernel_size=(self.
num_channels_in_dec, 1, 1))
self.ch_avg_pool_dec = nn.AvgPool3d(kernel_size=(self.
num_channels_in_dec, 1, 1))
self.conv1_dec = nn.Conv2d(in_channels=self.num_channels_in_dec,
out_channels=1, kernel_size=1, stride=1, padding=0)
self.conv2_dec = nn.Conv2d(in_channels=3, out_channels=1,
kernel_size=7, stride=1, padding=3)
def initialize_weights(self):
for layer in [self.conv1_enc, self.conv1_dec, self.conv2_enc, self.
conv2_dec]:
nn.init.normal_(layer.weight.data, mean=0.0, std=0.02)
nn.init.constant_(layer.bias.data, 0.0)
def forward(self, input_0, input_1):
primals_2 = self.conv1_enc.weight
primals_3 = self.conv1_enc.bias
primals_4 = self.conv2_enc.weight
primals_5 = self.conv2_enc.bias
primals_7 = self.conv1_dec.weight
primals_8 = self.conv1_dec.bias
primals_9 = self.conv2_dec.weight
primals_10 = self.conv2_dec.bias
primals_1 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | sAG | false | 7,564 | [
"MIT"
] | 1 | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | https://github.com/rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets/tree/75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_channels_in_enc, num_channels_in_dec):
super().__init__()
self.num_channels_in_enc = num_channels_in_enc
self.num_channels_in_dec = num_channels_in_dec
self.ch_max_pool_enc = nn.MaxPool3d(kernel_size=(self.
num_channels_in_enc, 1, 1))
self.ch_avg_pool_enc = nn.AvgPool3d(kernel_size=(self.
num_channels_in_enc, 1, 1))
self.conv1_enc = nn.Conv2d(in_channels=self.num_channels_in_enc,
out_channels=1, kernel_size=1, stride=1, padding=0)
self.conv2_enc = nn.Conv2d(in_channels=3, out_channels=1,
kernel_size=7, stride=1, padding=3)
self.ch_max_pool_dec = nn.MaxPool3d(kernel_size=(self.
num_channels_in_dec, 1, 1))
self.ch_avg_pool_dec = nn.AvgPool3d(kernel_size=(self.
num_channels_in_dec, 1, 1))
self.conv1_dec = nn.Conv2d(in_channels=self.num_channels_in_dec,
out_channels=1, kernel_size=1, stride=1, padding=0)
self.conv2_dec = nn.Conv2d(in_channels=3, out_channels=1,
kernel_size=7, stride=1, padding=3)
def forward(self, enc, dec):
enc = torch.cat(tensors=(self.ch_max_pool_enc(enc), self.
ch_avg_pool_enc(enc), self.conv1_enc(enc)), dim=1)
enc = self.conv2_enc(enc)
dec = torch.cat(tensors=(self.ch_max_pool_dec(dec), self.
ch_avg_pool_dec(dec), self.conv1_dec(dec)), dim=1)
dec = self.conv2_dec(dec)
out = torch.sigmoid(enc + dec)
return out
def initialize_weights(self):
for layer in [self.conv1_enc, self.conv1_dec, self.conv2_enc, self.
conv2_dec]:
nn.init.normal_(layer.weight.data, mean=0.0, std=0.02)
nn.init.constant_(layer.bias.data, 0.0)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
L1CompositionLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/am/camwpeehokwg5kta3nxp4pdqaj4l6nldns4yhguxa3yy3jfhld4k.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# loss => abs_1, sub_1
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pred_merged => add
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg3_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_abs_add_mean_mul_rsub_sub_0 = async_compile.triton('triton_per_fused_abs_add_mean_mul_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mean_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_mean_mul_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.abs(tmp9)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_mean_mul_rsub_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
class L1CompositionLoss(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super(L1CompositionLoss, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * l1_loss(pred_merged, ori_merged, weight,
reduction=self.reduction, sample_wise=self.sample_wise)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mean_mul_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.abs(tmp9)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_mean_mul_rsub_sub_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
class L1CompositionLossNew(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super(L1CompositionLossNew, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| rivergold/mmediting | L1CompositionLoss | false | 7,565 | [
"Apache-2.0"
] | 1 | fd972635c48bb065db29d1b5090592a87c7263d2 | https://github.com/rivergold/mmediting/tree/fd972635c48bb065db29d1b5090592a87c7263d2 | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
class Model(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `for
# ... truncated (>4000 chars) for memory efficiency |
DeepSupervisionModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yv/cyvap7j3rcqrtuv3wrc3n4rlhc4wagsezo7s4lrfe53ili5imvei.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4
# Graph fragment:
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 0.42857142857142855), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + (4*tmp10) + (16*x2)), xmask, eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + (4*tmp6) + (16*x2)), xmask, eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + (x4), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/km/ckmejwyh54enpg64wjr3q5vtz5xhxvr6ss5bn4i72xzdlbdlifi7.py
# Topologically Sorted Source Nodes: [cat, out_2], Original ATen: [aten.cat, aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# cat => cat
# out_2 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_7, add_8, clamp_max_6, clamp_min_5, clamp_min_6, convert_element_type_6, convert_element_type_7, iota_3, mul_6, mul_7, mul_8, sub_5, sub_6, sub_7
# Graph fragment:
# %cat : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%arg1_1, %slice_2], 1), kwargs = {})
# %iota_3 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_3, torch.float32), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_6, 0.42857142857142855), kwargs = {})
# %clamp_min_5 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_6, 0.0), kwargs = {})
# %convert_element_type_7 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_5, torch.int64), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_5, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_6), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_6), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_1 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64) % 8
x3 = (xindex // 512)
x5 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tmp15 + tmp7
tmp17 = triton_helpers.minimum(tmp16, tmp9)
tmp18 = x2
tmp19 = tl.full([1], 0, tl.int64)
tmp20 = tmp18 >= tmp19
tmp21 = tl.full([1], 4, tl.int64)
tmp22 = tmp18 < tmp21
tmp23 = tl.load(in_ptr0 + (tmp17 + (4*tmp10) + (16*x2) + (64*x3)), tmp22, eviction_policy='evict_last', other=0.0)
tmp24 = tmp18 >= tmp21
tmp25 = tl.full([1], 8, tl.int64)
tmp26 = tmp18 < tmp25
tmp27 = tl.load(in_ptr1 + (18 + tmp17 + (8*tmp10) + (64*((-4) + x2)) + (256*x3)), tmp24, eviction_policy='evict_last', other=0.0)
tmp28 = tl.where(tmp22, tmp23, tmp27)
tmp29 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2) + (64*x3)), tmp22, eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + (18 + tmp15 + (8*tmp10) + (64*((-4) + x2)) + (256*x3)), tmp24, eviction_policy='evict_last', other=0.0)
tmp31 = tl.where(tmp22, tmp29, tmp30)
tmp32 = tmp28 - tmp31
tmp33 = tmp15.to(tl.float32)
tmp34 = tmp14 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = 1.0
tmp37 = triton_helpers.minimum(tmp35, tmp36)
tmp38 = tmp32 * tmp37
tmp39 = tmp31 + tmp38
tmp40 = tl.load(in_ptr0 + (tmp17 + (4*tmp6) + (16*x2) + (64*x3)), tmp22, eviction_policy='evict_last', other=0.0)
tmp41 = tl.load(in_ptr1 + (18 + tmp17 + (8*tmp6) + (64*((-4) + x2)) + (256*x3)), tmp24, eviction_policy='evict_last', other=0.0)
tmp42 = tl.where(tmp22, tmp40, tmp41)
tmp43 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2) + (64*x3)), tmp22, eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr1 + (18 + tmp15 + (8*tmp6) + (64*((-4) + x2)) + (256*x3)), tmp24, eviction_policy='evict_last', other=0.0)
tmp45 = tl.where(tmp22, tmp43, tmp44)
tmp46 = tmp42 - tmp45
tmp47 = tmp46 * tmp37
tmp48 = tmp45 + tmp47
tl.store(in_out_ptr0 + (x5), tmp39, None)
tl.store(in_out_ptr1 + (x5), tmp48, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zi/czipwlrer7yngruenvrqslolhgcsx5r3sl55ve6v5vavpgceuobi.py
# Topologically Sorted Source Nodes: [cat_1, out_4], Original ATen: [aten.cat, aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# cat_1 => cat_1
# out_4 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_12, add_13, clamp_max_10, clamp_max_11, clamp_min_10, clamp_min_11, clamp_min_9, convert_element_type_10, convert_element_type_11, convert_element_type_9, iota_5, mul_11, mul_12, mul_13, mul_14, sub_10, sub_11, sub_12, sub_13, sub_14
# Graph fragment:
# %cat_1 : [num_users=4] = call_function[target=torch.ops.aten.cat.default](args = ([%arg2_1, %slice_4], 1), kwargs = {})
# %convert_element_type_9 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
# %iota_5 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_10 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_5, torch.float32), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_10, 0.42857142857142855), kwargs = {})
# %clamp_min_9 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_11, 0.0), kwargs = {})
# %convert_element_type_11 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_9, torch.int64), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_1, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_1, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_9, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_10), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_13), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_1, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%cat_1, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_10), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_12), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_13, %add_12), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %convert_element_type_9), kwargs = {})
# %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_13, 0.0), kwargs = {})
# %clamp_max_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_11, 1.0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %clamp_max_11), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_2 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x2 = (xindex // 64) % 12
x3 = (xindex // 768)
x6 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tmp15 + tmp7
tmp17 = triton_helpers.minimum(tmp16, tmp9)
tmp18 = x2
tmp19 = tl.full([1], 0, tl.int64)
tmp20 = tmp18 >= tmp19
tmp21 = tl.full([1], 4, tl.int64)
tmp22 = tmp18 < tmp21
tmp23 = tl.load(in_ptr0 + (tmp17 + (4*tmp10) + (16*x2) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp18 >= tmp21
tmp25 = tl.full([1], 12, tl.int64)
tmp26 = tmp18 < tmp25
tmp27 = tl.load(in_ptr1 + (18 + tmp17 + (8*tmp10) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr2 + (18 + tmp17 + (8*tmp10) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp28 - tmp27
tmp30 = tl.broadcast_to(2 + tmp10, [XBLOCK])
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp31 * tmp2
tmp33 = triton_helpers.maximum(tmp32, tmp4)
tmp34 = tmp33.to(tl.int32)
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp33 - tmp35
tmp37 = triton_helpers.maximum(tmp36, tmp4)
tmp38 = 1.0
tmp39 = triton_helpers.minimum(tmp37, tmp38)
tmp40 = tmp29 * tmp39
tmp41 = tmp27 + tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp24, tmp41, tmp42)
tmp44 = tl.where(tmp22, tmp23, tmp43)
tmp45 = tl.load(in_ptr0 + (tmp15 + (4*tmp10) + (16*x2) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp46 = tl.load(in_ptr1 + (18 + tmp15 + (8*tmp10) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr2 + (18 + tmp15 + (8*tmp10) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp47 - tmp46
tmp49 = tmp48 * tmp39
tmp50 = tmp46 + tmp49
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp24, tmp50, tmp51)
tmp53 = tl.where(tmp22, tmp45, tmp52)
tmp54 = tl.load(in_ptr0 + (tmp17 + (4*tmp6) + (16*x2) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp55 = tl.load(in_ptr1 + (18 + tmp17 + (8*tmp6) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr2 + (18 + tmp17 + (8*tmp6) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tmp56 - tmp55
tmp58 = tl.broadcast_to(2 + tmp6, [XBLOCK])
tmp59 = tmp58.to(tl.float32)
tmp60 = tmp59 * tmp2
tmp61 = triton_helpers.maximum(tmp60, tmp4)
tmp62 = tmp61.to(tl.int32)
tmp63 = tmp62.to(tl.float32)
tmp64 = tmp61 - tmp63
tmp65 = triton_helpers.maximum(tmp64, tmp4)
tmp66 = triton_helpers.minimum(tmp65, tmp38)
tmp67 = tmp57 * tmp66
tmp68 = tmp55 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp24, tmp68, tmp69)
tmp71 = tl.where(tmp22, tmp54, tmp70)
tmp72 = tl.load(in_ptr0 + (tmp15 + (4*tmp6) + (16*x2) + (64*x3)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp73 = tl.load(in_ptr1 + (18 + tmp15 + (8*tmp6) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.load(in_ptr2 + (18 + tmp15 + (8*tmp6) + (64*((-4) + x2)) + (512*x3)), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tmp74 - tmp73
tmp76 = tmp75 * tmp66
tmp77 = tmp73 + tmp76
tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype)
tmp79 = tl.where(tmp24, tmp77, tmp78)
tmp80 = tl.where(tmp22, tmp72, tmp79)
tmp81 = tmp44 - tmp53
tmp82 = tmp15.to(tl.float32)
tmp83 = tmp14 - tmp82
tmp84 = triton_helpers.maximum(tmp83, tmp4)
tmp85 = triton_helpers.minimum(tmp84, tmp38)
tmp86 = tmp81 * tmp85
tmp87 = tmp53 + tmp86
tmp88 = tmp71 - tmp80
tmp89 = tmp88 * tmp85
tmp90 = tmp80 + tmp89
tmp91 = tmp87 - tmp90
tmp92 = tmp6.to(tl.float32)
tmp93 = tmp5 - tmp92
tmp94 = triton_helpers.maximum(tmp93, tmp4)
tmp95 = triton_helpers.minimum(tmp94, tmp38)
tmp96 = tmp91 * tmp95
tl.store(out_ptr1 + (x6), tmp71, xmask)
tl.store(out_ptr2 + (x6), tmp80, xmask)
tl.store(in_out_ptr0 + (x6), tmp96, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dw/cdwhkkigkqcwthccttxm2auz3imjlysypx2bak5ym4zdgiskebvl.py
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_6 => cat_2
# Graph fragment:
# %cat_2 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg3_1, %slice_6], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 16
x3 = (xindex // 256)
x4 = xindex % 16
x0 = xindex % 4
x1 = (xindex // 4) % 4
x6 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 16, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (18 + x0 + (8*x1) + (64*((-4) + x2)) + (768*x3)), tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + (18 + x0 + (8*x1) + (64*((-4) + x2)) + (768*x3)), tmp6 & xmask, other=0.0)
tmp11 = tmp10 - tmp9
tmp12 = 2 + x0
tmp13 = tmp12.to(tl.float32)
tmp14 = 0.42857142857142855
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp17.to(tl.int32)
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp17 - tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp16)
tmp22 = 1.0
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tmp11 * tmp23
tmp25 = tmp9 + tmp24
tmp26 = tl.load(in_ptr3 + (18 + x0 + (8*x1) + (64*((-4) + x2)) + (768*x3)), tmp6 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp6, tmp27, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + (x6), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(buf1, arg0_1, 1024, grid=grid(1024), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32)
buf3 = buf2; del buf2 # reuse
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [cat, out_2], Original ATen: [aten.cat, aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_1.run(buf4, buf6, arg1_1, buf1, 2048, grid=grid(2048), stream=stream0)
del arg1_1
buf7 = empty_strided_cuda((4, 12, 8, 8), (768, 64, 8, 1), torch.float32)
buf9 = empty_strided_cuda((4, 12, 8, 8), (768, 64, 8, 1), torch.float32)
buf10 = empty_strided_cuda((4, 12, 8, 8), (768, 64, 8, 1), torch.float32)
buf11 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [cat_1, out_4], Original ATen: [aten.cat, aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_2.run(buf11, arg2_1, buf6, buf4, buf9, buf10, 3072, grid=grid(3072), stream=stream0)
del arg2_1
del buf4
del buf6
buf12 = reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(arg3_1, buf10, buf9, buf11, buf12, 1024, grid=grid(1024), stream=stream0)
del arg3_1
del buf10
del buf11
del buf9
return (buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DeepSupervisionModule(nn.Module):
def __init__(self, up_sampling_factors=(2, 2, 2)):
super(DeepSupervisionModule, self).__init__()
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.up_sampling_factors = up_sampling_factors
def forward(self, dec4, dec3, dec2, dec1):
out = self.up(dec4)
if self.up_sampling_factors[0] == 4:
out = self.up(out)
start = [(out.shape[-2] - dec3.shape[-2]) // 2, (out.shape[-1] -
dec3.shape[-1]) // 2]
length = [dec3.shape[-2], dec3.shape[-1]]
out = torch.narrow(torch.narrow(out, dim=2, start=start[0], length=
length[0]), dim=3, start=start[1], length=length[1])
out = self.up(torch.cat(tensors=(dec3, out), dim=1))
if self.up_sampling_factors[1] == 4:
out = self.up(out)
start = [(out.shape[-2] - dec2.shape[-2]) // 2, (out.shape[-1] -
dec2.shape[-1]) // 2]
length = [dec2.shape[-2], dec2.shape[-1]]
out = torch.narrow(torch.narrow(out, dim=2, start=start[0], length=
length[0]), dim=3, start=start[1], length=length[1])
out = self.up(torch.cat(tensors=(dec2, out), dim=1))
if self.up_sampling_factors[2] == 4:
out = self.up(out)
start = [(out.shape[-2] - dec1.shape[-2]) // 2, (out.shape[-1] -
dec1.shape[-1]) // 2]
length = [dec1.shape[-2], dec1.shape[-1]]
out = torch.narrow(torch.narrow(out, dim=2, start=start[0], length=
length[0]), dim=3, start=start[1], length=length[1])
out = torch.cat(tensors=(dec1, out), dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(
in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tmp15 + tmp7
tmp18 = triton_helpers.minimum(tmp17, tmp9)
tmp19 = tl.load(in_ptr0 + (tmp18 + 4 * tmp10 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp20 = tmp19 - tmp16
tmp21 = tmp15.to(tl.float32)
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp4)
tmp24 = 1.0
tmp25 = triton_helpers.minimum(tmp23, tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp16 + tmp26
tmp28 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (tmp18 + 4 * tmp6 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp30 = tmp29 - tmp28
tmp31 = tmp30 * tmp25
tmp32 = tmp28 + tmp31
tmp33 = tmp27 - tmp32
tmp34 = tmp6.to(tl.float32)
tmp35 = tmp5 - tmp34
tmp36 = triton_helpers.maximum(tmp35, tmp4)
tmp37 = triton_helpers.minimum(tmp36, tmp24)
tmp38 = tmp33 * tmp37
tmp39 = tmp32 + tmp38
tl.store(in_out_ptr0 + x4, tmp39, xmask)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_1(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64 % 8
x3 = xindex // 512
x5 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tmp15 + tmp7
tmp17 = triton_helpers.minimum(tmp16, tmp9)
tmp18 = x2
tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 4, tl.int64)
tmp22 = tmp18 < tmp21
tmp23 = tl.load(in_ptr0 + (tmp17 + 4 * tmp10 + 16 * x2 + 64 * x3),
tmp22, eviction_policy='evict_last', other=0.0)
tmp24 = tmp18 >= tmp21
tl.full([1], 8, tl.int64)
tmp27 = tl.load(in_ptr1 + (18 + tmp17 + 8 * tmp10 + 64 * (-4 + x2) +
256 * x3), tmp24, eviction_policy='evict_last', other=0.0)
tmp28 = tl.where(tmp22, tmp23, tmp27)
tmp29 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2 + 64 * x3),
tmp22, eviction_policy='evict_last', other=0.0)
tmp30 = tl.load(in_ptr1 + (18 + tmp15 + 8 * tmp10 + 64 * (-4 + x2) +
256 * x3), tmp24, eviction_policy='evict_last', other=0.0)
tmp31 = tl.where(tmp22, tmp29, tmp30)
tmp32 = tmp28 - tmp31
tmp33 = tmp15.to(tl.float32)
tmp34 = tmp14 - tmp33
tmp35 = triton_helpers.maximum(tmp34, tmp4)
tmp36 = 1.0
tmp37 = triton_helpers.minimum(tmp35, tmp36)
tmp38 = tmp32 * tmp37
tmp39 = tmp31 + tmp38
tmp40 = tl.load(in_ptr0 + (tmp17 + 4 * tmp6 + 16 * x2 + 64 * x3), tmp22,
eviction_policy='evict_last', other=0.0)
tmp41 = tl.load(in_ptr1 + (18 + tmp17 + 8 * tmp6 + 64 * (-4 + x2) + 256 *
x3), tmp24, eviction_policy='evict_last', other=0.0)
tmp42 = tl.where(tmp22, tmp40, tmp41)
tmp43 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2 + 64 * x3), tmp22,
eviction_policy='evict_last', other=0.0)
tmp44 = tl.load(in_ptr1 + (18 + tmp15 + 8 * tmp6 + 64 * (-4 + x2) + 256 *
x3), tmp24, eviction_policy='evict_last', other=0.0)
tmp45 = tl.where(tmp22, tmp43, tmp44)
tmp46 = tmp42 - tmp45
tmp47 = tmp46 * tmp37
tmp48 = tmp45 + tmp47
tl.store(in_out_ptr0 + x5, tmp39, None)
tl.store(in_out_ptr1 + x5, tmp48, None)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr1, out_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x2 = xindex // 64 % 12
x3 = xindex // 768
x6 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.42857142857142855
tmp3 = tmp1 * tmp2
tmp4 = 0.0
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp6 = tmp5.to(tl.int32)
tmp7 = tl.full([1], 1, tl.int64)
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 3, tl.int64)
tmp10 = triton_helpers.minimum(tmp8, tmp9)
tmp11 = x0
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp12 * tmp2
tmp14 = triton_helpers.maximum(tmp13, tmp4)
tmp15 = tmp14.to(tl.int32)
tmp16 = tmp15 + tmp7
tmp17 = triton_helpers.minimum(tmp16, tmp9)
tmp18 = x2
tl.full([1], 0, tl.int64)
tmp21 = tl.full([1], 4, tl.int64)
tmp22 = tmp18 < tmp21
tmp23 = tl.load(in_ptr0 + (tmp17 + 4 * tmp10 + 16 * x2 + 64 * x3),
tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp18 >= tmp21
tl.full([1], 12, tl.int64)
tmp27 = tl.load(in_ptr1 + (18 + tmp17 + 8 * tmp10 + 64 * (-4 + x2) +
512 * x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tl.load(in_ptr2 + (18 + tmp17 + 8 * tmp10 + 64 * (-4 + x2) +
512 * x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tmp28 - tmp27
tmp30 = tl.broadcast_to(2 + tmp10, [XBLOCK])
tmp31 = tmp30.to(tl.float32)
tmp32 = tmp31 * tmp2
tmp33 = triton_helpers.maximum(tmp32, tmp4)
tmp34 = tmp33.to(tl.int32)
tmp35 = tmp34.to(tl.float32)
tmp36 = tmp33 - tmp35
tmp37 = triton_helpers.maximum(tmp36, tmp4)
tmp38 = 1.0
tmp39 = triton_helpers.minimum(tmp37, tmp38)
tmp40 = tmp29 * tmp39
tmp41 = tmp27 + tmp40
tmp42 = tl.full(tmp41.shape, 0.0, tmp41.dtype)
tmp43 = tl.where(tmp24, tmp41, tmp42)
tmp44 = tl.where(tmp22, tmp23, tmp43)
tmp45 = tl.load(in_ptr0 + (tmp15 + 4 * tmp10 + 16 * x2 + 64 * x3),
tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp46 = tl.load(in_ptr1 + (18 + tmp15 + 8 * tmp10 + 64 * (-4 + x2) +
512 * x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr2 + (18 + tmp15 + 8 * tmp10 + 64 * (-4 + x2) +
512 * x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp47 - tmp46
tmp49 = tmp48 * tmp39
tmp50 = tmp46 + tmp49
tmp51 = tl.full(tmp50.shape, 0.0, tmp50.dtype)
tmp52 = tl.where(tmp24, tmp50, tmp51)
tmp53 = tl.where(tmp22, tmp45, tmp52)
tmp54 = tl.load(in_ptr0 + (tmp17 + 4 * tmp6 + 16 * x2 + 64 * x3), tmp22 &
xmask, eviction_policy='evict_last', other=0.0)
tmp55 = tl.load(in_ptr1 + (18 + tmp17 + 8 * tmp6 + 64 * (-4 + x2) + 512 *
x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp56 = tl.load(in_ptr2 + (18 + tmp17 + 8 * tmp6 + 64 * (-4 + x2) + 512 *
x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp57 = tmp56 - tmp55
tmp58 = tl.broadcast_to(2 + tmp6, [XBLOCK])
tmp59 = tmp58.to(tl.float32)
tmp60 = tmp59 * tmp2
tmp61 = triton_helpers.maximum(tmp60, tmp4)
tmp62 = tmp61.to(tl.int32)
tmp63 = tmp62.to(tl.float32)
tmp64 = tmp61 - tmp63
tmp65 = triton_helpers.maximum(tmp64, tmp4)
tmp66 = triton_helpers.minimum(tmp65, tmp38)
tmp67 = tmp57 * tmp66
tmp68 = tmp55 + tmp67
tmp69 = tl.full(tmp68.shape, 0.0, tmp68.dtype)
tmp70 = tl.where(tmp24, tmp68, tmp69)
tmp71 = tl.where(tmp22, tmp54, tmp70)
tmp72 = tl.load(in_ptr0 + (tmp15 + 4 * tmp6 + 16 * x2 + 64 * x3), tmp22 &
xmask, eviction_policy='evict_last', other=0.0)
tmp73 = tl.load(in_ptr1 + (18 + tmp15 + 8 * tmp6 + 64 * (-4 + x2) + 512 *
x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp74 = tl.load(in_ptr2 + (18 + tmp15 + 8 * tmp6 + 64 * (-4 + x2) + 512 *
x3), tmp24 & xmask, eviction_policy='evict_last', other=0.0)
tmp75 = tmp74 - tmp73
tmp76 = tmp75 * tmp66
tmp77 = tmp73 + tmp76
tmp78 = tl.full(tmp77.shape, 0.0, tmp77.dtype)
tmp79 = tl.where(tmp24, tmp77, tmp78)
tmp80 = tl.where(tmp22, tmp72, tmp79)
tmp81 = tmp44 - tmp53
tmp82 = tmp15.to(tl.float32)
tmp83 = tmp14 - tmp82
tmp84 = triton_helpers.maximum(tmp83, tmp4)
tmp85 = triton_helpers.minimum(tmp84, tmp38)
tmp86 = tmp81 * tmp85
tmp87 = tmp53 + tmp86
tmp88 = tmp71 - tmp80
tmp89 = tmp88 * tmp85
tmp90 = tmp80 + tmp89
tmp91 = tmp87 - tmp90
tmp92 = tmp6.to(tl.float32)
tmp93 = tmp5 - tmp92
tmp94 = triton_helpers.maximum(tmp93, tmp4)
tmp95 = triton_helpers.minimum(tmp94, tmp38)
tmp96 = tmp91 * tmp95
tl.store(out_ptr1 + x6, tmp71, xmask)
tl.store(out_ptr2 + x6, tmp80, xmask)
tl.store(in_out_ptr0 + x6, tmp96, xmask)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 16
x3 = xindex // 256
x4 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x6 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 16, tl.int64)
tmp9 = tl.load(in_ptr1 + (18 + x0 + 8 * x1 + 64 * (-4 + x2) + 768 * x3),
tmp6 & xmask, other=0.0)
tmp10 = tl.load(in_ptr2 + (18 + x0 + 8 * x1 + 64 * (-4 + x2) + 768 * x3
), tmp6 & xmask, other=0.0)
tmp11 = tmp10 - tmp9
tmp12 = 2 + x0
tmp13 = tmp12.to(tl.float32)
tmp14 = 0.42857142857142855
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp17.to(tl.int32)
tmp19 = tmp18.to(tl.float32)
tmp20 = tmp17 - tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp16)
tmp22 = 1.0
tmp23 = triton_helpers.minimum(tmp21, tmp22)
tmp24 = tmp11 * tmp23
tmp25 = tmp9 + tmp24
tmp26 = tl.load(in_ptr3 + (18 + x0 + 8 * x1 + 64 * (-4 + x2) + 768 * x3
), tmp6 & xmask, other=0.0)
tmp27 = tmp25 + tmp26
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp6, tmp27, tmp28)
tmp30 = tl.where(tmp4, tmp5, tmp29)
tl.store(out_ptr0 + x6, tmp30, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(1024)](buf1, arg0_1, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32)
buf3 = buf2
del buf2
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 8, 8, 8), (512, 64, 8, 1), torch.float32)
buf6 = buf5
del buf5
triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_1[
grid(2048)](buf4, buf6, arg1_1, buf1, 2048, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
buf7 = empty_strided_cuda((4, 12, 8, 8), (768, 64, 8, 1), torch.float32
)
buf9 = empty_strided_cuda((4, 12, 8, 8), (768, 64, 8, 1), torch.float32
)
buf10 = empty_strided_cuda((4, 12, 8, 8), (768, 64, 8, 1), torch.
float32)
buf11 = buf7
del buf7
triton_poi_fused__to_copy__unsafe_index_add_arange_cat_clamp_mul_sub_2[
grid(3072)](buf11, arg2_1, buf6, buf4, buf9, buf10, 3072,
XBLOCK=128, num_warps=4, num_stages=1)
del arg2_1
del buf4
del buf6
buf12 = reinterpret_tensor(buf1, (4, 16, 4, 4), (256, 16, 4, 1), 0)
del buf1
triton_poi_fused_cat_3[grid(1024)](arg3_1, buf10, buf9, buf11,
buf12, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del arg3_1
del buf10
del buf11
del buf9
return buf12,
class DeepSupervisionModuleNew(nn.Module):
def __init__(self, up_sampling_factors=(2, 2, 2)):
super(DeepSupervisionModuleNew, self).__init__()
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.up_sampling_factors = up_sampling_factors
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets | DeepSupervisionModule | false | 7,566 | [
"MIT"
] | 1 | 75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | https://github.com/rinkwitz/Thesis_Semantic_Image_Segmentation_on_Satellite_Imagery_using_UNets/tree/75d3a4a536f6ef81fe0efd4f5fbba32b627a7472 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, up_sampling_factors=(2, 2, 2)):
super().__init__()
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.up_sampling_factors = up_sampling_factors
def forward(self, dec4, dec3, dec2, dec1):
out = self.up(dec4)
if self.up_sampling_factors[0] == 4:
out = self.up(out)
start = [(out.shape[-2] - dec3.shape[-2]) // 2, (out.shape[-1] -
dec3.shape[-1]) // 2]
length = [dec3.shape[-2], dec3.shape[-1]]
out = torch.narrow(torch.narrow(out, dim=2, start=start[0], length=
length[0]), dim=3, start=start[1], length=length[1])
out = self.up(torch.cat(tensors=(dec3, out), dim=1))
if self.up_sampling_factors[1] == 4:
out = self.up(out)
start = [(out.shape[-2] - dec2.shape[-2]) // 2, (out.shape[-1] -
dec2.shape[-1]) // 2]
length = [dec2.shape[-2], dec2.shape[-1]]
out = torch.narrow(torch.narrow(out, dim=2, start=start[0], length=
length[0]), dim=3, start=start[1], length=length[1])
out = self.up(torch.cat(tensors=(dec2, out), dim=1))
if self.up_sampling_factors[2] == 4:
out = self.up(out)
start = [(out.shape[-2] - dec1.shape[-2]) // 2, (out.shape[-1] -
dec1.shape[-1]) // 2]
length = [dec1.shape[-2], dec1.shape[-1]]
out = torch.narrow(torch.narrow(out, dim=2, start=start[0], length=
length[0]), dim=3, start=start[1], length=length[1])
out = torch.cat(tensors=(dec1, out), dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# score_1 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# score_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [score_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_8
return (reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0), buf4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 4), (16, 4, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1,
score_function='dot_product', dropout=0):
""" Attention Mechanism
:param embed_dim:
:param hidden_dim:
:param out_dim:
:param n_head: num of head (Multi-Head Attention)
:param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot)
:return (?, q_len, out_dim,)
"""
super(Attention, self).__init__()
if hidden_dim is None:
hidden_dim = embed_dim // n_head
if out_dim is None:
out_dim = embed_dim
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_head = n_head
self.score_function = score_function
self.w_k = nn.Linear(embed_dim, n_head * hidden_dim)
self.w_q = nn.Linear(embed_dim, n_head * hidden_dim)
self.proj = nn.Linear(n_head * hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
if score_function == 'mlp':
self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2))
elif self.score_function == 'bi_linear':
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
else:
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_dim)
if self.weight is not None:
self.weight.data.uniform_(-stdv, stdv)
def forward(self, k, q):
if len(q.shape) == 2:
q = torch.unsqueeze(q, dim=1)
if len(k.shape) == 2:
k = torch.unsqueeze(k, dim=1)
mb_size = k.shape[0]
k_len = k.shape[1]
q_len = q.shape[1]
kx = self.w_k(k).view(mb_size, k_len, self.n_head, self.hidden_dim)
kx = kx.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self.
hidden_dim)
qx = self.w_q(q).view(mb_size, q_len, self.n_head, self.hidden_dim)
qx = qx.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self.
hidden_dim)
if self.score_function == 'dot_product':
kt = kx.permute(0, 2, 1)
score = torch.bmm(qx, kt)
elif self.score_function == 'scaled_dot_product':
kt = kx.permute(0, 2, 1)
qkt = torch.bmm(qx, kt)
score = torch.div(qkt, math.sqrt(self.hidden_dim))
elif self.score_function == 'mlp':
kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1)
qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1)
kq = torch.cat((kxx, qxx), dim=-1)
score = F.tanh(torch.matmul(kq, self.weight))
elif self.score_function == 'bi_linear':
qw = torch.matmul(qx, self.weight)
kt = kx.permute(0, 2, 1)
score = torch.bmm(qw, kt)
else:
raise RuntimeError('invalid score_function')
score = F.softmax(score, dim=-1)
output = torch.bmm(score, kx)
output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1)
output = self.proj(output)
output = self.dropout(output)
return output, score
def get_inputs():
return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 4), (16, 4, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_2, (16,
4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_6, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_5
del primals_6
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf2
del buf2
triton_poi_fused__softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
extern_kernels.bmm(buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 4,
1), 0), out=buf5)
buf6 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_8, reinterpret_tensor(buf5, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf6)
del primals_8
return reinterpret_tensor(buf6, (4, 4, 4), (16, 4, 1), 0
), buf4, reinterpret_tensor(primals_2, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), buf4, reinterpret_tensor(buf5, (16, 4), (4, 1), 0
), primals_7, reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0)
class AttentionNew(nn.Module):
def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1,
score_function='dot_product', dropout=0):
""" Attention Mechanism
:param embed_dim:
:param hidden_dim:
:param out_dim:
:param n_head: num of head (Multi-Head Attention)
:param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot)
:return (?, q_len, out_dim,)
"""
super(AttentionNew, self).__init__()
if hidden_dim is None:
hidden_dim = embed_dim // n_head
if out_dim is None:
out_dim = embed_dim
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_head = n_head
self.score_function = score_function
self.w_k = nn.Linear(embed_dim, n_head * hidden_dim)
self.w_q = nn.Linear(embed_dim, n_head * hidden_dim)
self.proj = nn.Linear(n_head * hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
if score_function == 'mlp':
self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2))
elif self.score_function == 'bi_linear':
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
else:
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_dim)
if self.weight is not None:
self.weight.data.uniform_(-stdv, stdv)
def forward(self, input_0, input_1):
primals_3 = self.w_k.weight
primals_4 = self.w_k.bias
primals_5 = self.w_q.weight
primals_6 = self.w_q.bias
primals_7 = self.proj.weight
primals_8 = self.proj.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0], output[1]
| rmarcacini/LC-ABSA | Attention | false | 7,567 | [
"MIT"
] | 1 | 90ae7f41b3766761005caf015292926127fe3949 | https://github.com/rmarcacini/LC-ABSA/tree/90ae7f41b3766761005caf015292926127fe3949 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, embed_dim, hidden_dim=None, out_dim=None, n_head=1,
score_function='dot_product', dropout=0):
""" Attention Mechanism
:param embed_dim:
:param hidden_dim:
:param out_dim:
:param n_head: num of head (Multi-Head Attention)
:param score_function: scaled_dot_product / mlp (concat) / bi_linear (general dot)
:return (?, q_len, out_dim,)
"""
super().__init__()
if hidden_dim is None:
hidden_dim = embed_dim // n_head
if out_dim is None:
out_dim = embed_dim
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.n_head = n_head
self.score_function = score_function
self.w_k = nn.Linear(embed_dim, n_head * hidden_dim)
self.w_q = nn.Linear(embed_dim, n_head * hidden_dim)
self.proj = nn.Linear(n_head * hidden_dim, out_dim)
self.dropout = nn.Dropout(dropout)
if score_function == 'mlp':
self.weight = nn.Parameter(torch.Tensor(hidden_dim * 2))
elif self.score_function == 'bi_linear':
self.weight = nn.Parameter(torch.Tensor(hidden_dim, hidden_dim))
else:
self.register_parameter('weight', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_dim)
if self.weight is not None:
self.weight.data.uniform_(-stdv, stdv)
def forward(self, k, q):
if len(q.shape) == 2:
q = torch.unsqueeze(q, dim=1)
if len(k.shape) == 2:
k = torch.unsqueeze(k, dim=1)
mb_size = k.shape[0]
k_len = k.shape[1]
q_len = q.shape[1]
kx = self.w_k(k).view(mb_size, k_len, self.n_head, self.hidden_dim)
kx = kx.permute(2, 0, 1, 3).contiguous().view(-1, k_len, self.
hidden_dim)
qx = self.w_q(q).view(mb_size, q_len, self.n_head, self.hidden_dim)
qx = qx.permute(2, 0, 1, 3).contiguous().view(-1, q_len, self.
hidden_dim)
if self.score_function == 'dot_product':
kt = kx.permute(0, 2, 1)
score = torch.bmm(qx, kt)
elif self.score_function == 'scaled_dot_product':
kt = kx.permute(0, 2, 1)
qkt = torch.bmm(qx, kt)
score = torch.div(qkt, math.sqrt(self.hidden_dim))
elif self.score_function == 'mlp':
kxx = torch.unsqueeze(kx, dim=1).expand(-1, q_len, -1, -1)
qxx = torch.unsqueeze(qx, dim=2).expand(-1, -1, k_len, -1)
kq = torch.cat((kxx, qxx), dim=-1)
score = F.tanh(torch.matmul(kq, self.weight))
elif self.score_function == 'bi_linear':
qw = torch.matmul(qx, self.weight)
kt = kx.permute(0, 2, 1)
score = torch.bmm(qw, kt)
else:
raise RuntimeError('invalid score_function')
score = F.softmax(score, dim=-1)
output = torch.bmm(score, kx)
output = torch.cat(torch.split(output, mb_size, dim=0), dim=-1)
output = self.proj(output)
output = self.dropout(output)
return output, score
def get_inputs():
return [torch.rand([4, 4, 1, 4]), torch.rand([4, 4, 1, 4])]
def get_init_inputs():
return [4]
|
Conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qf/cqf7gwutkswogpoieukkzrlezczaf4jzo3cnrl7zupsezutoj3ez.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class Conv(nn.Module):
"""
2d卷积
先batchnorm再ReLU,默认有ReLU但是没有BN
默认小核
"""
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False,
relu=True):
super(Conv, self).__init__()
self.inp_dim = inp_dim
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride,
padding=(kernel_size - 1) // 2, bias=True)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
assert x.size()[1] == self.inp_dim, '{} {}'.format(x.size()[1],
self.inp_dim)
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inp_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf1, primals_1, primals_2, buf2
class ConvNew(nn.Module):
"""
2d卷积
先batchnorm再ReLU,默认有ReLU但是没有BN
默认小核
"""
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False,
relu=True):
super(ConvNew, self).__init__()
self.inp_dim = inp_dim
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride,
padding=(kernel_size - 1) // 2, bias=True)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| rm-rf-me/Study-stacked-hourglass | Conv | false | 7,568 | [
"BSD-3-Clause"
] | 1 | 48441f0dd5ae3397470c70db0f50ab5576b9d2f2 | https://github.com/rm-rf-me/Study-stacked-hourglass/tree/48441f0dd5ae3397470c70db0f50ab5576b9d2f2 | import torch
import torch.utils.data
from torch import nn
class Model(nn.Module):
"""
2d卷积
先batchnorm再ReLU,默认有ReLU但是没有BN
默认小核
"""
def __init__(self, inp_dim, out_dim, kernel_size=3, stride=1, bn=False,
relu=True):
super().__init__()
self.inp_dim = inp_dim
self.conv = nn.Conv2d(inp_dim, out_dim, kernel_size, stride,
padding=(kernel_size - 1) // 2, bias=True)
self.relu = None
self.bn = None
if relu:
self.relu = nn.ReLU()
if bn:
self.bn = nn.BatchNorm2d(out_dim)
def forward(self, x):
assert x.size()[1] == self.inp_dim, '{} {}'.format(x.size()[1],
self.inp_dim)
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LandmarkHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rp/crpinnbyfuoihrintxnd7bxm7xhi4uxtn45i3km2osirfc5ippha.py
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
# Source node to ATen node mapping:
# out_1 => clone
# view => view
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1, 8]), kwargs = {})
triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 393216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (24, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (24, ), (1, ))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 24, 64, 64), (98304, 1, 1536, 24))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 24), (98304, 1536, 24, 1), 0); del buf1 # reuse
buf3 = reinterpret_tensor(buf2, (4, 12288, 8), (98304, 8, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
triton_poi_fused_clone_view_1.run(buf3, primals_2, 393216, grid=grid(393216), stream=stream0)
del primals_2
return (buf3, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((24, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from itertools import product as product
class LandmarkHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 8, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 8)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from itertools import product as product
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (24, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (24,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 24, 64, 64), (98304, 1, 1536, 24))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 24), (98304, 1536, 24,
1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 12288, 8), (98304, 8, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(393216)](buf3, primals_2, 393216,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class LandmarkHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 8, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| qw85639229/Car_License_SVM | LandmarkHead | false | 7,569 | [
"MIT"
] | 1 | c5b0062e84e5000c7940b1d90cc7c63e52afed21 | https://github.com/qw85639229/Car_License_SVM/tree/c5b0062e84e5000c7940b1d90cc7c63e52afed21 | import torch
import torch.nn as nn
from itertools import product as product
class Model(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super().__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 8, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 8)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rg/crg522m3y4v7k4jllgwpydciu6bjqsfnsxrer5whyf4hotsoe5rw.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/h7/ch7ziltjnllhlwal6dz2n67p6gl5e2gojxkzuefleah4glcy25od.py
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [Q], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [K], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [V], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dots], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf7)
del primals_9
return (reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf6, (16, 4), (4, 1), 0), primals_8, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
class Attention(torch.nn.Module):
def __init__(self, features, attn_dim):
super(Attention, self).__init__()
self.to_q = torch.nn.Linear(features, attn_dim)
self.to_k = torch.nn.Linear(features, attn_dim)
self.to_v = torch.nn.Linear(features, attn_dim)
self.project = torch.nn.Linear(attn_dim, features)
def forward(self, x):
Q = self.to_q(x)
K = self.to_k(x)
V = self.to_v(x)
dots = torch.bmm(Q, K.permute(0, 2, 1))
attn = F.softmax(dots, 0)
out = torch.bmm(attn, V)
out = self.project(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'features': 4, 'attn_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf2)
del primals_6
del primals_7
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 4,
1), 0), out=buf6)
buf7 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf7)
del primals_9
return reinterpret_tensor(buf7, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf6, (16, 4), (4, 1), 0
), primals_8, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
class AttentionNew(torch.nn.Module):
def __init__(self, features, attn_dim):
super(AttentionNew, self).__init__()
self.to_q = torch.nn.Linear(features, attn_dim)
self.to_k = torch.nn.Linear(features, attn_dim)
self.to_v = torch.nn.Linear(features, attn_dim)
self.project = torch.nn.Linear(attn_dim, features)
def forward(self, input_0):
primals_1 = self.to_q.weight
primals_2 = self.to_q.bias
primals_4 = self.to_k.weight
primals_5 = self.to_k.bias
primals_6 = self.to_v.weight
primals_7 = self.to_v.bias
primals_8 = self.project.weight
primals_9 = self.project.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| rish-16/pytorch-graphdl | Attention | false | 7,570 | [
"MIT"
] | 1 | 631da8cbf24e67fab2122c507e1935d4acf26e41 | https://github.com/rish-16/pytorch-graphdl/tree/631da8cbf24e67fab2122c507e1935d4acf26e41 | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, features, attn_dim):
super().__init__()
self.to_q = torch.nn.Linear(features, attn_dim)
self.to_k = torch.nn.Linear(features, attn_dim)
self.to_v = torch.nn.Linear(features, attn_dim)
self.project = torch.nn.Linear(attn_dim, features)
def forward(self, x):
Q = self.to_q(x)
K = self.to_k(x)
V = self.to_v(x)
dots = torch.bmm(Q, K.permute(0, 2, 1))
attn = F.softmax(dots, 0)
out = torch.bmm(attn, V)
out = self.project(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DQN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/om/com5yebxo5qsahe3lhucgobrzm6npeoi425wxqvff6fvddh4edcs.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (512*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ne/cnepmjd66uu3laeexeusfxab3aayptiri2wp2knrgtgmx52tvzxj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ub/cubq6mwncunyjmqflzpohu2yx3nbpr4nqxrc52pzll64qdraayed.py
# Topologically Sorted Source Nodes: [conv2d, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [4, 4], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 2048], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 1035
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (1035*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + (32*x2) + (33120*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yg/cyg7vbt2gdbmfx3lsg5ur2uhn4u2rduakwkwvreokkpnad22mdmv.py
# Topologically Sorted Source Nodes: [conv2d_1, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 53760
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ra/crabviiruolsyez6gpqibnseksdal55s6wmpbs2mp3dhmq4ih4cx.py
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_4 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 512
xnumel = 152
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = (yindex // 128)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (128*x2) + (19456*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (152*y3)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + (128*x2) + (19456*y1)), tmp6, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vm/cvmov3inzvpsh4jpqe4q6w2qzcune6prysd6txail3qiwclxodlb.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_6 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 185, 95), (17575, 17575, 95, 1))
assert_size_stride(primals_2, (32, 1, 8, 8), (64, 64, 8, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (512, 19456), (19456, 1))
assert_size_stride(primals_9, (512, ), (1, ))
assert_size_stride(primals_10, (4, 512), (512, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 2048, 16, grid=grid(2048, 16), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 8192, 9, grid=grid(8192, 9), stream=stream0)
del primals_6
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_1, primals_2, stride=(4, 4), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 45, 23), (33120, 1035, 23, 1))
buf3 = empty_strided_cuda((4, 32, 45, 23), (33120, 1, 736, 32), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf2, primals_3, buf3, 128, 1035, grid=grid(128, 1035), stream=stream0)
del buf2
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 21, 10), (13440, 1, 640, 64))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf5, primals_5, 53760, grid=grid(53760), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, buf1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 19, 8), (19456, 1, 1024, 128))
buf7 = empty_strided_cuda((4, 128, 19, 8), (19456, 152, 8, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 19, 8), (19456, 1, 1024, 128), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_4.run(buf6, primals_7, buf7, buf11, 512, 152, grid=grid(512, 152), stream=stream0)
del buf6
del primals_7
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 19456), (19456, 1), 0), reinterpret_tensor(primals_8, (19456, 512), (1, 19456), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf9, primals_9, 2048, grid=grid(2048), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(primals_10, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_2, buf0, buf1, primals_1, buf3, buf5, reinterpret_tensor(buf7, (4, 19456), (19456, 1), 0), buf9, primals_10, primals_8, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 185, 95), (17575, 17575, 95, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 1, 8, 8), (64, 64, 8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((512, 19456), (19456, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class DQN(nn.Module):
"""
Deep Q-Network: Actor (Policy) Model.
(function approximator for the Q-table)
"""
def __init__(self, state_size, action_size, seed, fc1_unit=64, fc2_unit=64
):
"""
Initialize parameters and build model.
Params
=======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_unit (int): Number of neurons in first hidden layer
fc2_unit (int): Number of neurons in second hidden layer
"""
super(DQN, self).__init__()
self.seed = torch.manual_seed(seed)
self.conv1 = nn.Conv2d(1, 32, 8, stride=4, padding=1)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc1 = nn.Linear(1 * 128 * 19 * 8, 512)
self.fc2 = nn.Linear(512, action_size)
def forward(self, state):
"""
mapping a state to action-values.
---
args:
state: state tensor (grayscale img)
returns:
q_values: array of length 6. It corresponds to the action-values for each action given the input state
q_values=[Q(state, a_1), Q(state, a_2), ..., Q(state, a_6)]
"""
x = state.clone()
x = x.view(-1, 1, 185, 95)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 128 * 19 * 8)
x = F.relu(self.fc1(x))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 1, 185, 95])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 1035
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 1035 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (y0 + 32 * x2 + 33120 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 53760
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 512
xnumel = 152
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 128
y1 = yindex // 128
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 128 * x2 + 19456 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 152 * y3), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y0 + 128 * x2 + 19456 * y1), tmp6, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 185, 95), (17575, 17575, 95, 1))
assert_size_stride(primals_2, (32, 1, 8, 8), (64, 64, 8, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (512, 19456), (19456, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (4, 512), (512, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 16)](primals_4, buf0, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch
.float32)
triton_poi_fused_1[grid(8192, 9)](primals_6, buf1, 8192, 9, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf2 = extern_kernels.convolution(primals_1, primals_2, stride=(4,
4), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 45, 23), (33120, 1035, 23, 1))
buf3 = empty_strided_cuda((4, 32, 45, 23), (33120, 1, 736, 32),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(128, 1035)](buf2,
primals_3, buf3, 128, 1035, XBLOCK=32, YBLOCK=32, num_warps=4,
num_stages=1)
del buf2
del primals_3
buf4 = extern_kernels.convolution(buf3, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 21, 10), (13440, 1, 640, 64))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_3[grid(53760)](buf5, primals_5,
53760, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, buf1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 19, 8), (19456, 1, 1024, 128))
buf7 = empty_strided_cuda((4, 128, 19, 8), (19456, 152, 8, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 19, 8), (19456, 1, 1024, 128),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(512, 152)](
buf6, primals_7, buf7, buf11, 512, 152, XBLOCK=32, YBLOCK=32,
num_warps=4, num_stages=1)
del buf6
del primals_7
buf8 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 19456), (19456, 1),
0), reinterpret_tensor(primals_8, (19456, 512), (1, 19456), 0),
out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_5[grid(2048)](buf9, primals_9, 2048, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(
primals_10, (512, 4), (1, 512), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_2, buf0, buf1, primals_1, buf3, buf5,
reinterpret_tensor(buf7, (4, 19456), (19456, 1), 0), buf9,
primals_10, primals_8, buf11)
class DQNNew(nn.Module):
"""
Deep Q-Network: Actor (Policy) Model.
(function approximator for the Q-table)
"""
def __init__(self, state_size, action_size, seed, fc1_unit=64, fc2_unit=64
):
"""
Initialize parameters and build model.
Params
=======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_unit (int): Number of neurons in first hidden layer
fc2_unit (int): Number of neurons in second hidden layer
"""
super(DQNNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.conv1 = nn.Conv2d(1, 32, 8, stride=4, padding=1)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc1 = nn.Linear(1 * 128 * 19 * 8, 512)
self.fc2 = nn.Linear(512, action_size)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| qarchli/dqn-on-space-invaders | DQN | false | 7,571 | [
"MIT"
] | 1 | 148f1a7b65b2f47dab736b08cc7d6b7de1725a00 | https://github.com/qarchli/dqn-on-space-invaders/tree/148f1a7b65b2f47dab736b08cc7d6b7de1725a00 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Deep Q-Network: Actor (Policy) Model.
(function approximator for the Q-table)
"""
def __init__(self, state_size, action_size, seed, fc1_unit=64, fc2_unit=64
):
"""
Initialize parameters and build model.
Params
=======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_unit (int): Number of neurons in first hidden layer
fc2_unit (int): Number of neurons in second hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.conv1 = nn.Conv2d(1, 32, 8, stride=4, padding=1)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc1 = nn.Linear(1 * 128 * 19 * 8, 512)
self.fc2 = nn.Linear(512, action_size)
def forward(self, state):
"""
mapping a state to action-values.
---
args:
state: state tensor (grayscale img)
returns:
q_values: array of length 6. It corresponds to the action-values for each action given the input state
q_values=[Q(state, a_1), Q(state, a_2), ..., Q(state, a_6)]
"""
x = state.clone()
x = x.view(-1, 1, 185, 95)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 128 * 19 * 8)
x = F.relu(self.fc1(x))
return self.fc2(x)
def get_inputs():
return [torch.rand([4, 1, 185, 95])]
def get_init_inputs():
return [4, 4, 4]
|
HeatmapLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xf/cxf2mveni2qzuqw5oi7mwzm5vuj4a63unfx2dikmtlxpnubv3xi5.py
# Topologically Sorted Source Nodes: [sub, l, mean, mean_1], Original ATen: [aten.sub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# l => pow_1
# mean => mean
# mean_1 => mean_1
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [3]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [2]), kwargs = {})
triton_poi_fused_mean_pow_sub_0 = async_compile.triton('triton_poi_fused_mean_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 32, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (16*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr1 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr1 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp42 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr1 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr1 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr1 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr1 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp63 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp67 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp68 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp72 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp73 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp77 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp78 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp23
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp24 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp29 + tmp33
tmp37 = tmp35 - tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp34 + tmp38
tmp40 = tmp39 / tmp19
tmp41 = tmp20 + tmp40
tmp44 = tmp42 - tmp43
tmp45 = tmp44 * tmp44
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp48
tmp50 = tmp45 + tmp49
tmp53 = tmp51 - tmp52
tmp54 = tmp53 * tmp53
tmp55 = tmp50 + tmp54
tmp58 = tmp56 - tmp57
tmp59 = tmp58 * tmp58
tmp60 = tmp55 + tmp59
tmp61 = tmp60 / tmp19
tmp62 = tmp41 + tmp61
tmp65 = tmp63 - tmp64
tmp66 = tmp65 * tmp65
tmp69 = tmp67 - tmp68
tmp70 = tmp69 * tmp69
tmp71 = tmp66 + tmp70
tmp74 = tmp72 - tmp73
tmp75 = tmp74 * tmp74
tmp76 = tmp71 + tmp75
tmp79 = tmp77 - tmp78
tmp80 = tmp79 * tmp79
tmp81 = tmp76 + tmp80
tmp82 = tmp81 / tmp19
tmp83 = tmp62 + tmp82
tmp84 = tmp83 / tmp19
tl.store(out_ptr0 + (x0), tmp84, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/35/c35v4ojil7vwot3cvbaaxwst2htsi5dh2y4o4d7fr4ma3zmlulqt.py
# Topologically Sorted Source Nodes: [l_1], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# l_1 => mean_2
# Graph fragment:
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean_1, [1]), kwargs = {})
triton_poi_fused_mean_1 = async_compile.triton('triton_poi_fused_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, l, mean, mean_1], Original ATen: [aten.sub, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_pow_sub_0.run(arg0_1, arg1_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [l_1], Original ATen: [aten.mean]
triton_poi_fused_mean_1.run(buf0, buf1, 4, grid=grid(4), stream=stream0)
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class HeatmapLoss(torch.nn.Module):
"""
loss for detection heatmap
"""
def __init__(self):
super(HeatmapLoss, self).__init__()
def forward(self, pred, gt):
l = (pred - gt) ** 2
l = l.mean(dim=3).mean(dim=2).mean(dim=1)
return l
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 16 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr1 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr1 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp22 = tl.load(in_ptr1 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp26 = tl.load(in_ptr1 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp31 = tl.load(in_ptr1 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp36 = tl.load(in_ptr1 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp42 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp43 = tl.load(in_ptr1 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp46 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp47 = tl.load(in_ptr1 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp51 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp52 = tl.load(in_ptr1 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp56 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp57 = tl.load(in_ptr1 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp63 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp64 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp67 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp68 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp72 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp73 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp77 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp78 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = 4.0
tmp20 = tmp18 / tmp19
tmp23 = tmp21 - tmp22
tmp24 = tmp23 * tmp23
tmp27 = tmp25 - tmp26
tmp28 = tmp27 * tmp27
tmp29 = tmp24 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp34 = tmp29 + tmp33
tmp37 = tmp35 - tmp36
tmp38 = tmp37 * tmp37
tmp39 = tmp34 + tmp38
tmp40 = tmp39 / tmp19
tmp41 = tmp20 + tmp40
tmp44 = tmp42 - tmp43
tmp45 = tmp44 * tmp44
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp48
tmp50 = tmp45 + tmp49
tmp53 = tmp51 - tmp52
tmp54 = tmp53 * tmp53
tmp55 = tmp50 + tmp54
tmp58 = tmp56 - tmp57
tmp59 = tmp58 * tmp58
tmp60 = tmp55 + tmp59
tmp61 = tmp60 / tmp19
tmp62 = tmp41 + tmp61
tmp65 = tmp63 - tmp64
tmp66 = tmp65 * tmp65
tmp69 = tmp67 - tmp68
tmp70 = tmp69 * tmp69
tmp71 = tmp66 + tmp70
tmp74 = tmp72 - tmp73
tmp75 = tmp74 * tmp74
tmp76 = tmp71 + tmp75
tmp79 = tmp77 - tmp78
tmp80 = tmp79 * tmp79
tmp81 = tmp76 + tmp80
tmp82 = tmp81 / tmp19
tmp83 = tmp62 + tmp82
tmp84 = tmp83 / tmp19
tl.store(out_ptr0 + x0, tmp84, xmask)
@triton.jit
def triton_poi_fused_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_pow_sub_0[grid(16)](arg0_1, arg1_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mean_1[grid(4)](buf0, buf1, 4, XBLOCK=4, num_warps
=1, num_stages=1)
del buf0
return buf1,
class HeatmapLossNew(torch.nn.Module):
"""
loss for detection heatmap
"""
def __init__(self):
super(HeatmapLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| rm-rf-me/Study-stacked-hourglass | HeatmapLoss | false | 7,572 | [
"BSD-3-Clause"
] | 1 | 48441f0dd5ae3397470c70db0f50ab5576b9d2f2 | https://github.com/rm-rf-me/Study-stacked-hourglass/tree/48441f0dd5ae3397470c70db0f50ab5576b9d2f2 | import torch
import torch.utils.data
class Model(torch.nn.Module):
"""
loss for detection heatmap
"""
def __init__(self):
super().__init__()
def forward(self, pred, gt):
l = (pred - gt) ** 2
l = l.mean(dim=3).mean(dim=2).mean(dim=1)
return l
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MSECompositionLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/bv/cbvrs5xmjnaa5kevipbtguhjfkbeqz3fqzbe2wyky7pkkyvy5djt.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.mse_loss, aten.mean]
# Source node to ATen node mapping:
# loss => pow_1, sub_1
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pred_merged => add
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_add_mean_mse_loss_mul_rsub_0 = async_compile.triton('triton_per_fused_add_mean_mse_loss_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mse_loss_mul_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mse_loss_mul_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.mse_loss, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mse_loss_mul_rsub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
class MSECompositionLoss(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super(MSECompositionLoss, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * mse_loss(pred_merged, ori_merged, weight,
reduction=self.reduction, sample_wise=self.sample_wise)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import functools
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mse_loss_mul_rsub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mse_loss_mul_rsub_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
class MSECompositionLossNew(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super(MSECompositionLossNew, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| rivergold/mmediting | MSECompositionLoss | false | 7,573 | [
"Apache-2.0"
] | 1 | fd972635c48bb065db29d1b5090592a87c7263d2 | https://github.com/rivergold/mmediting/tree/fd972635c48bb065db29d1b5090592a87c7263d2 | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
class Model(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argum
# ... truncated (>4000 chars) for memory efficiency |
Entmax15 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/uc/cuct53d7tbv4oqgsdpn3pyhu4ztoebl3dm3bhhovn4qqnrghpmsp.py
# Topologically Sorted Source Nodes: [max_1, X, X_1, sort, pow_1, cumsum_1, cumsum], Original ATen: [aten.max, aten.sub, aten.div, aten.sort, aten.pow, aten.cumsum]
# Source node to ATen node mapping:
# X => sub
# X_1 => div
# cumsum => cumsum
# cumsum_1 => cumsum_1
# max_1 => max_1
# pow_1 => pow_1
# sort => sort
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %getitem), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, 2), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%div, -1, True), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%getitem_2, 2), kwargs = {})
# %cumsum_1 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%pow_1, -1), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%getitem_2, -1), kwargs = {})
triton_per_fused_cumsum_div_max_pow_sort_sub_0 = async_compile.triton('triton_per_fused_cumsum_div_max_pow_sort_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_div_max_pow_sort_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_div_max_pow_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = r1
tmp12 = tmp11.to(tl.int16)
tmp13 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15, tmp16, = triton_helpers.sort_with_index(tmp13, tmp14, None, 1, stable=False, descending=True)
tmp17 = tmp15 * tmp15
tmp18 = tmp17.to(tl.float32)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp20, = tl.associative_scan((tmp19,), 1, _triton_helper_fn_add0)
tmp21 = tmp15.to(tl.float32)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp23, = tl.associative_scan((tmp22,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp10, xmask)
tl.store(out_ptr1 + (r1 + (4*x0)), tmp15, xmask)
tl.store(out_ptr2 + (r1 + (4*x0)), tmp20, xmask)
tl.store(out_ptr3 + (r1 + (4*x0)), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fc/cfc35vcmsbfc27vwyzjeuvwv7proixzoady3x4dn4wkwb46on5ba.py
# Topologically Sorted Source Nodes: [mean_sq, mean, pow_2, sub_1, ss, sub_2, delta, delta_nz, sqrt, tau, le, sum_1], Original ATen: [aten.div, aten.pow, aten.sub, aten.mul, aten.rsub, aten.clamp, aten.sqrt, aten.le, aten.sum]
# Source node to ATen node mapping:
# delta => div_3
# delta_nz => clamp_min
# le => le
# mean => div_1
# mean_sq => div_2
# pow_2 => pow_2
# sqrt => sqrt
# ss => mul_1
# sub_1 => sub_1
# sub_2 => sub_2
# sum_1 => sum_1
# tau => sub_3
# Graph fragment:
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cumsum_1, %permute), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%cumsum, %permute), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div_1, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_2, %pow_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_1), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %permute), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div_3, 0), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %sqrt), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Tensor](args = (%sub_3, %getitem_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%le, [-1]), kwargs = {})
triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1 = async_compile.triton('triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i64', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp54 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp5 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = tmp1 - tmp7
tmp9 = tmp8 / tmp1
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp2 - tmp12
tmp15 = tmp13 <= tmp14
tmp16 = tmp15.to(tl.int64)
tmp18 = 2.0
tmp19 = tmp17 / tmp18
tmp21 = tmp20 / tmp18
tmp22 = tmp19 * tmp19
tmp23 = tmp21 - tmp22
tmp24 = tmp18 * tmp23
tmp25 = tmp1 - tmp24
tmp26 = tmp25 / tmp18
tmp27 = triton_helpers.maximum(tmp26, tmp10)
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp19 - tmp28
tmp31 = tmp29 <= tmp30
tmp32 = tmp31.to(tl.int64)
tmp33 = tmp16 + tmp32
tmp35 = 3.0
tmp36 = tmp34 / tmp35
tmp38 = tmp37 / tmp35
tmp39 = tmp36 * tmp36
tmp40 = tmp38 - tmp39
tmp41 = tmp35 * tmp40
tmp42 = tmp1 - tmp41
tmp43 = tmp42 / tmp35
tmp44 = triton_helpers.maximum(tmp43, tmp10)
tmp45 = libdevice.sqrt(tmp44)
tmp46 = tmp36 - tmp45
tmp48 = tmp46 <= tmp47
tmp49 = tmp48.to(tl.int64)
tmp50 = tmp33 + tmp49
tmp52 = 4.0
tmp53 = tmp51 / tmp52
tmp55 = tmp54 / tmp52
tmp56 = tmp53 * tmp53
tmp57 = tmp55 - tmp56
tmp58 = tmp52 * tmp57
tmp59 = tmp1 - tmp58
tmp60 = tmp59 / tmp52
tmp61 = triton_helpers.maximum(tmp60, tmp10)
tmp62 = libdevice.sqrt(tmp61)
tmp63 = tmp53 - tmp62
tmp65 = tmp63 <= tmp64
tmp66 = tmp65.to(tl.int64)
tmp67 = tmp50 + tmp66
tl.store(out_ptr0 + (x0), tmp67, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ew/cew26k3sdtsxsjc6buf3rp6skduxkoiohho322xjrv4c2dhheduy.py
# Topologically Sorted Source Nodes: [mean_sq, mean, pow_2, sub_1, ss, sub_2, delta, delta_nz, sqrt, tau, sub_4, tau_star, sub_5, clamp_1, Y], Original ATen: [aten.div, aten.pow, aten.sub, aten.mul, aten.rsub, aten.clamp, aten.sqrt, aten.gather]
# Source node to ATen node mapping:
# Y => pow_3
# clamp_1 => clamp_min_1
# delta => div_3
# delta_nz => clamp_min
# mean => div_1
# mean_sq => div_2
# pow_2 => pow_2
# sqrt => sqrt
# ss => mul_1
# sub_1 => sub_1
# sub_2 => sub_2
# sub_4 => sub_4
# sub_5 => sub_5
# tau => sub_3
# tau_star => gather
# Graph fragment:
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%cumsum_1, %permute), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%cumsum, %permute), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%div_1, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_2, %pow_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %mul_1), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %permute), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div_3, 0), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %sub_3 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div_1, %sqrt), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, 1), kwargs = {})
# %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%sub_3, -1, %sub_4), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %gather), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min_1, 2), kwargs = {})
triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2 = async_compile.triton('triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp1 - tmp2
tmp4 = tl.full([XBLOCK], 4, tl.int32)
tmp5 = tmp3 + tmp4
tmp6 = tmp3 < 0
tmp7 = tl.where(tmp6, tmp5, tmp3)
tl.device_assert(((0 <= tmp7) & (tmp7 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp7 < 4")
tmp9 = tl.load(in_ptr2 + (tmp7 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = 1 + tmp7
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tl.load(in_ptr3 + (tmp7 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tmp13 / tmp11
tmp15 = tmp12 * tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp11 * tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tmp20 = tmp19 / tmp11
tmp21 = 0.0
tmp22 = triton_helpers.maximum(tmp20, tmp21)
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp12 - tmp23
tmp25 = tmp0 - tmp24
tmp26 = triton_helpers.maximum(tmp25, tmp21)
tmp27 = tmp26 * tmp26
tl.store(out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, X, X_1, sort, pow_1, cumsum_1, cumsum], Original ATen: [aten.max, aten.sub, aten.div, aten.sort, aten.pow, aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused_cumsum_div_max_pow_sort_sub_0.run(arg0_1, buf0, buf1, buf3, buf4, 64, 4, grid=grid(64), stream=stream0)
del arg0_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [mean_sq, mean, pow_2, sub_1, ss, sub_2, delta, delta_nz, sqrt, tau, le, sum_1], Original ATen: [aten.div, aten.pow, aten.sub, aten.mul, aten.rsub, aten.clamp, aten.sqrt, aten.le, aten.sum]
triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1.run(buf4, buf3, buf1, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mean_sq, mean, pow_2, sub_1, ss, sub_2, delta, delta_nz, sqrt, tau, sub_4, tau_star, sub_5, clamp_1, Y], Original ATen: [aten.div, aten.pow, aten.sub, aten.mul, aten.rsub, aten.clamp, aten.sqrt, aten.gather]
triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2.run(buf0, buf5, buf4, buf3, buf6, 256, grid=grid(256), stream=stream0)
del buf0
del buf3
del buf4
del buf5
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
def _make_ix_like(X, dim):
d = X.size(dim)
rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)
view = [1] * X.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _roll_last(X, dim):
if dim == -1:
return X
elif dim < 0:
dim = X.dim() - dim
perm = [i for i in range(X.dim()) if i != dim] + [dim]
return X.permute(perm)
def _entmax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for 1.5-entmax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]:
Xsrt, _ = torch.sort(X, dim=dim, descending=True)
else:
Xsrt, _ = torch.topk(X, k=k, dim=dim)
rho = _make_ix_like(Xsrt, dim)
mean = Xsrt.cumsum(dim) / rho
mean_sq = (Xsrt ** 2).cumsum(dim) / rho
ss = rho * (mean_sq - mean ** 2)
delta = (1 - ss) / rho
delta_nz = torch.clamp(delta, 0)
tau = mean - torch.sqrt(delta_nz)
support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim)
tau_star = tau.gather(dim, support_size - 1)
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
X_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _entmax_threshold_and_support(X_, dim=-1, k=2 * k)
_roll_last(tau_star, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau_star, support_size
def entmax15(X, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return Entmax15Function.apply(X, dim, k)
class Entmax15Function(Function):
@classmethod
def forward(cls, ctx, X, dim=0, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val
X = X / 2
tau_star, _ = _entmax_threshold_and_support(X, dim=dim, k=k)
Y = torch.clamp(X - tau_star, min=0) ** 2
ctx.save_for_backward(Y)
return Y
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y.sqrt()
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None
class Entmax15(nn.Module):
def __init__(self, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
"""
self.dim = dim
self.k = k
super(Entmax15, self).__init__()
def forward(self, X):
return entmax15(X, dim=self.dim, k=self.k)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_div_max_pow_sort_sub_0(in_ptr0, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 0.5
tmp10 = tmp8 * tmp9
tmp11 = r1
tmp12 = tmp11.to(tl.int16)
tmp13 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp14 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15, _tmp16 = triton_helpers.sort_with_index(tmp13, tmp14, None, 1,
stable=False, descending=True)
tmp17 = tmp15 * tmp15
tmp18 = tmp17.to(tl.float32)
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp20, = tl.associative_scan((tmp19,), 1, _triton_helper_fn_add0)
tmp21 = tmp15.to(tl.float32)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp23, = tl.associative_scan((tmp22,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp10, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp15, xmask)
tl.store(out_ptr2 + (r1 + 4 * x0), tmp20, xmask)
tl.store(out_ptr3 + (r1 + 4 * x0), tmp23, xmask)
@triton.jit
def triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp47 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp51 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp54 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp64 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp5 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = tmp1 - tmp7
tmp9 = tmp8 / tmp1
tmp10 = 0.0
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp2 - tmp12
tmp15 = tmp13 <= tmp14
tmp16 = tmp15.to(tl.int64)
tmp18 = 2.0
tmp19 = tmp17 / tmp18
tmp21 = tmp20 / tmp18
tmp22 = tmp19 * tmp19
tmp23 = tmp21 - tmp22
tmp24 = tmp18 * tmp23
tmp25 = tmp1 - tmp24
tmp26 = tmp25 / tmp18
tmp27 = triton_helpers.maximum(tmp26, tmp10)
tmp28 = libdevice.sqrt(tmp27)
tmp29 = tmp19 - tmp28
tmp31 = tmp29 <= tmp30
tmp32 = tmp31.to(tl.int64)
tmp33 = tmp16 + tmp32
tmp35 = 3.0
tmp36 = tmp34 / tmp35
tmp38 = tmp37 / tmp35
tmp39 = tmp36 * tmp36
tmp40 = tmp38 - tmp39
tmp41 = tmp35 * tmp40
tmp42 = tmp1 - tmp41
tmp43 = tmp42 / tmp35
tmp44 = triton_helpers.maximum(tmp43, tmp10)
tmp45 = libdevice.sqrt(tmp44)
tmp46 = tmp36 - tmp45
tmp48 = tmp46 <= tmp47
tmp49 = tmp48.to(tl.int64)
tmp50 = tmp33 + tmp49
tmp52 = 4.0
tmp53 = tmp51 / tmp52
tmp55 = tmp54 / tmp52
tmp56 = tmp53 * tmp53
tmp57 = tmp55 - tmp56
tmp58 = tmp52 * tmp57
tmp59 = tmp1 - tmp58
tmp60 = tmp59 / tmp52
tmp61 = triton_helpers.maximum(tmp60, tmp10)
tmp62 = libdevice.sqrt(tmp61)
tmp63 = tmp53 - tmp62
tmp65 = tmp63 <= tmp64
tmp66 = tmp65.to(tl.int64)
tmp67 = tmp50 + tmp66
tl.store(out_ptr0 + x0, tmp67, xmask)
@triton.jit
def triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp1 - tmp2
tmp4 = tl.full([XBLOCK], 4, tl.int32)
tmp5 = tmp3 + tmp4
tmp6 = tmp3 < 0
tmp7 = tl.where(tmp6, tmp5, tmp3)
tl.device_assert((0 <= tmp7) & (tmp7 < 4) | ~xmask,
'index out of bounds: 0 <= tmp7 < 4')
tmp9 = tl.load(in_ptr2 + (tmp7 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp10 = 1 + tmp7
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tl.load(in_ptr3 + (tmp7 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp14 = tmp13 / tmp11
tmp15 = tmp12 * tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp11 * tmp16
tmp18 = 1.0
tmp19 = tmp18 - tmp17
tmp20 = tmp19 / tmp11
tmp21 = 0.0
tmp22 = triton_helpers.maximum(tmp20, tmp21)
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tmp12 - tmp23
tmp25 = tmp0 - tmp24
tmp26 = triton_helpers.maximum(tmp25, tmp21)
tmp27 = tmp26 * tmp26
tl.store(out_ptr0 + x2, tmp27, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_div_max_pow_sort_sub_0[grid(64)](arg0_1,
buf0, buf1, buf3, buf4, 64, 4, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_clamp_div_le_mul_pow_rsub_sqrt_sub_sum_1[grid(64)](
buf4, buf3, buf1, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf6 = buf1
del buf1
triton_poi_fused_clamp_div_gather_mul_pow_rsub_sqrt_sub_2[grid(256)](
buf0, buf5, buf4, buf3, buf6, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf3
del buf4
del buf5
return buf6,
def _make_ix_like(X, dim):
d = X.size(dim)
rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)
view = [1] * X.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _roll_last(X, dim):
if dim == -1:
return X
elif dim < 0:
dim = X.dim() - dim
perm = [i for i in range(X.dim()) if i != dim] + [dim]
return X.permute(perm)
def _entmax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for 1.5-entmax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]:
Xsrt, _ = torch.sort(X, dim=dim, descending=True)
else:
Xsrt, _ = torch.topk(X, k=k, dim=dim)
rho = _make_ix_like(Xsrt, dim)
mean = Xsrt.cumsum(dim) / rho
mean_sq = (Xsrt ** 2).cumsum(dim) / rho
ss = rho * (mean_sq - mean ** 2)
delta = (1 - ss) / rho
delta_nz = torch.clamp(delta, 0)
tau = mean - torch.sqrt(delta_nz)
support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim)
tau_star = tau.gather(dim, support_size - 1)
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
X_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _entmax_threshold_and_support(X_, dim=-1, k=2 * k)
_roll_last(tau_star, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau_star, support_size
def entmax15(X, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return Entmax15Function.apply(X, dim, k)
class Entmax15Function(Function):
@classmethod
def forward(cls, ctx, X, dim=0, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val
X = X / 2
tau_star, _ = _entmax_threshold_and_support(X, dim=dim, k=k)
Y = torch.clamp(X - tau_star, min=0) ** 2
ctx.save_for_backward(Y)
return Y
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y.sqrt()
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None
class Entmax15New(nn.Module):
def __init__(self, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
"""
self.dim = dim
self.k = k
super(Entmax15New, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| roholazandie/entmax | Entmax15 | false | 7,574 | [
"MIT"
] | 1 | 657374e6a792ec6840b6f78bc759cc1f51570aad | https://github.com/roholazandie/entmax/tree/657374e6a792ec6840b6f78bc759cc1f51570aad | from torch.autograd import Function
import torch
import torch.nn as nn
def _make_ix_like(X, dim):
d = X.size(dim)
rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)
view = [1] * X.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _roll_last(X, dim):
if dim == -1:
return X
elif dim < 0:
dim = X.dim() - dim
perm = [i for i in range(X.dim()) if i != dim] + [dim]
return X.permute(perm)
def _entmax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for 1.5-entmax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]:
Xsrt, _ = torch.sort(X, dim=dim, descending=True)
else:
Xsrt, _ = torch.topk(X, k=k, dim=dim)
rho = _make_ix_like(Xsrt, dim)
mean = Xsrt.cumsum(dim) / rho
mean_sq = (Xsrt ** 2).cumsum(dim) / rho
ss = rho * (mean_sq - mean ** 2)
delta = (1 - ss) / rho
delta_nz = torch.clamp(delta, 0)
tau = mean - torch.sqrt(delta_nz)
support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim)
tau_star = tau.gather(dim, support_size - 1)
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
X_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _entmax_threshold_and_support(X_, dim=-1, k=2 * k)
_roll_last(tau_star, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau_star, support_size
def entmax15(X, dim=-1, k=None):
"""1.5-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_1.5(p) s.t. p >= 0, sum(p) == 1.
where H_1.5(p) is the Tsallis alpha-entropy with alpha=1.5.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply 1.5-entmax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return Entmax15Function.apply(X, dim, k)
class Entmax15Function(Function):
@classmethod
def forward(cls, ctx, X, dim=0, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val
X = X / 2
tau_star, _ = _entmax_threshold_and_support(X, dim=dim, k=k)
Y = torch.clamp(X - tau_star, min=0) ** 2
ctx.save_for_backward(Y)
return Y
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y.sqrt()
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None
class Model(nn.Module):
def __init__(self, dim=-1, k=None):
"""1.5-entma
# ... truncated (>4000 chars) for memory efficiency |
TransformerLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3l/c3lu4ccbjruychszpewk67ythz75gaj4rslgmbux6fatrywe7g7t.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
# Source node to ATen node mapping:
# multi_head_attention_forward => add_2
# Graph fragment:
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %getitem_5), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ol/colbiyeeegfdyyzeckjnylgg3xt3rkh3aadcz7fjtfx5472nedsg.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_6, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c5/cc5cm2utkmzhcjdhw5qgs7t254ixwfil74kthoebdprvhlljdmul.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
# Source node to ATen node mapping:
# multi_head_attention_forward => add_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %getitem_4), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_10,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vr/cvr4r3wywm7v22nxp3lz5bbts2qzlkixo4xmtukbeu5a7skrqu3x.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add_3
# src_1 => var_mean
# Graph fragment:
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %permute_12), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0), xmask)
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/al/calf6gx6upgj6o5xhdzbm7rmjgmfcpny5xyiqq3k674n4jmhzu6h.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add_3
# src_1 => add_4, add_5, mul_1, mul_2, rsqrt, sub_1
# Graph fragment:
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %permute_12), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_7), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_6), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_7), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*y0)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0 + (4*x1)), xmask & ymask)
tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x1 + (4*y0)), tmp13, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6x/c6xqpjqns57feeddzu3xqlak3x7fjcdw4jtol5idz2zbyoz73qfj.py
# Topologically Sorted Source Nodes: [celu], Original ATen: [aten.celu]
# Source node to ATen node mapping:
# celu => expm1, gt, where
# Graph fragment:
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%addmm_1,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_1, 0), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %addmm_1, %expm1), kwargs = {})
triton_poi_fused_celu_8 = async_compile.triton('triton_poi_fused_celu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_celu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_celu_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = libdevice.expm1(tmp0)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iv/civbliykhldc5kiuu4ijxc5ypigx2yuadcujnn6edprsymzdi544.py
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src_2 => add_6
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_tensor), kwargs = {})
triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3k/c3kx45p4sb6jfhm7rckz7y6zoifac7jqyxgq66unmjojul7i45qh.py
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_3 => add_7, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_6, [1]), kwargs = {correction: 0, keepdim: True})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {})
triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/g3/cg3az2ob7cxf2o2trqd3jahd42kcorve44arenxwiiofyuqjwg6w.py
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_3 => add_7, add_8, mul_3, mul_4, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_6, [1]), kwargs = {correction: 0, keepdim: True})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %getitem_9), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_12), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_13), kwargs = {})
triton_poi_fused_native_layer_norm_11 = async_compile.triton('triton_poi_fused_native_layer_norm_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (16, 4), (4, 1))
assert_size_stride(primals_9, (16, ), (1, ))
assert_size_stride(primals_10, (4, 16), (16, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
buf4 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf4, primals_3, 16, grid=grid(16), stream=stream0)
buf5 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.add]
triton_poi_fused_add_2.run(buf5, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (4, 1, 4), (1, 0, 4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf6, buf7, 64, grid=grid(64), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf9, buf10, 4, 4, grid=grid(4, 4), stream=stream0)
buf11 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_5
buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf11, buf12, buf13, 4, grid=grid(4), stream=stream0)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf11, buf12, buf13, primals_6, primals_7, buf14, 4, 4, grid=grid(4, 4), stream=stream0)
del primals_7
buf15 = reinterpret_tensor(buf7, (4, 16), (16, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf14, reinterpret_tensor(primals_8, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_9
buf16 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [celu], Original ATen: [aten.celu]
triton_poi_fused_celu_8.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf16, reinterpret_tensor(primals_10, (16, 4), (1, 16), 0), out=buf17)
buf18 = buf17; del buf17 # reuse
# Topologically Sorted Source Nodes: [src_2], Original ATen: [aten.add]
triton_poi_fused_add_9.run(buf18, buf14, primals_11, 16, grid=grid(16), stream=stream0)
del primals_11
buf19 = buf13; del buf13 # reuse
buf20 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_10.run(buf18, buf19, buf20, 4, grid=grid(4), stream=stream0)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_3], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_11.run(buf18, buf19, buf20, primals_12, primals_13, buf21, 16, grid=grid(16), stream=stream0)
del buf19
del buf20
del primals_13
return (buf21, primals_1, primals_6, primals_12, buf8, reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf11, buf14, buf15, buf16, buf18, primals_10, primals_8, primals_4, reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from typing import Optional
class TransformerLayer(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward_multiplier=4,
layer_norm_eps=1e-05, device=None, dtype=None) ->None:
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0)
dim_feedforward = dim_feedforward_multiplier * d_model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.activation = nn.CELU()
def forward(self, src: 'torch.Tensor', src_mask:
'Optional[torch.Tensor]'=None, src_key_padding_mask:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src.swapaxes(0, 1), src.swapaxes(0, 1), src.
swapaxes(0, 1), attn_mask=src_mask, key_padding_mask=
src_key_padding_mask)[0].swapaxes(0, 1)
src = src + src2
src = self.norm1(src)
src2 = self.linear2(self.activation(self.linear1(src)))
src = src + src2
src = self.norm2(src)
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0), xmask)
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0), xmask)
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (12 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * y0), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0 + 4 * x1), xmask & ymask)
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x1 + 4 * y0), tmp13, xmask & ymask)
@triton.jit
def triton_poi_fused_celu_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = libdevice.expm1(tmp0)
tmp4 = tl.where(tmp2, tmp0, tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (16, 4), (4, 1))
assert_size_stride(primals_9, (16,), (1,))
assert_size_stride(primals_10, (4, 16), (16, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf2, (4, 1, 4), (4, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_poi_fused_add_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
triton_poi_fused_mul_1[grid(16)](buf4, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0)
del buf1
triton_poi_fused_add_2[grid(16)](buf5, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf4, reinterpret_tensor(buf5, (4, 1, 4), (1, 0,
4), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf6, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused__softmax_4[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf3, (4, 4, 1), (1, 4,
0), 0), out=buf9)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf9, buf10, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (4, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_5, reinterpret_tensor(buf10, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_5
buf12 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf11,
buf12, buf13, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(4, 4)](primals_1,
buf11, buf12, buf13, primals_6, primals_7, buf14, 4, 4, XBLOCK=
4, YBLOCK=4, num_warps=1, num_stages=1)
del primals_7
buf15 = reinterpret_tensor(buf7, (4, 16), (16, 1), 0)
del buf7
extern_kernels.addmm(primals_9, buf14, reinterpret_tensor(primals_8,
(4, 16), (1, 4), 0), alpha=1, beta=1, out=buf15)
del primals_9
buf16 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_celu_8[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf16, reinterpret_tensor(primals_10, (16, 4), (1,
16), 0), out=buf17)
buf18 = buf17
del buf17
triton_poi_fused_add_9[grid(16)](buf18, buf14, primals_11, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_11
buf19 = buf13
del buf13
buf20 = buf12
del buf12
triton_poi_fused_native_layer_norm_10[grid(4)](buf18, buf19, buf20,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_11[grid(16)](buf18, buf19, buf20,
primals_12, primals_13, buf21, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf19
del buf20
del primals_13
return (buf21, primals_1, primals_6, primals_12, buf8,
reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf11, buf14, buf15,
buf16, buf18, primals_10, primals_8, primals_4, reinterpret_tensor(
buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 1, 4),
(1, 1, 4), 0), reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0))
class TransformerLayerNew(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward_multiplier=4,
layer_norm_eps=1e-05, device=None, dtype=None) ->None:
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0)
dim_feedforward = dim_feedforward_multiplier * d_model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.activation = nn.CELU()
def forward(self, input_0):
primals_2 = self.self_attn.in_proj_weight
primals_3 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_8 = self.linear1.weight
primals_9 = self.linear1.bias
primals_10 = self.linear2.weight
primals_6 = self.linear2.bias
primals_7 = self.norm1.weight
primals_11 = self.norm1.bias
primals_12 = self.norm2.weight
primals_13 = self.norm2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| rgreenblatt/path | TransformerLayer | false | 7,575 | [
"MIT"
] | 1 | 2057618ee3a6067c230c1c1c40856d2c9f5006b0 | https://github.com/rgreenblatt/path/tree/2057618ee3a6067c230c1c1c40856d2c9f5006b0 | import torch
from torch import nn
from typing import Optional
class Model(nn.Module):
"""TransformerEncoderLayer is made up of self-attn and feedforward network.
This standard encoder layer is based on the paper "Attention Is All You Need".
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in
Neural Information Processing Systems, pages 6000-6010. Users may modify or implement
in a different way during application.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
layer_norm_eps: the eps value in layer normalization components (default=1e-5).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward_multiplier=4,
layer_norm_eps=1e-05, device=None, dtype=None) ->None:
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0)
dim_feedforward = dim_feedforward_multiplier * d_model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps)
self.activation = nn.CELU()
def forward(self, src: 'torch.Tensor', src_mask:
'Optional[torch.Tensor]'=None, src_key_padding_mask:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
src2 = self.self_attn(src.swapaxes(0, 1), src.swapaxes(0, 1), src.
swapaxes(0, 1), attn_mask=src_mask, key_padding_mask=
src_key_padding_mask)[0].swapaxes(0, 1)
src = src + src2
src = self.norm1(src)
src2 = self.linear2(self.activation(self.linear1(src)))
src = src + src2
src = self.norm2(src)
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
AE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7g/c7gdibvkv66wahy4qteufqgghcjbacvogl535jigu4ill24lb4pn.py
# Topologically Sorted Source Nodes: [x, out1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out1 => gt, mul, where
# x => convolution
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ag/cagavkog5o27eoekhbihlksvmikgcymf2wq5rb2url3hsqeryznv.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_offsets_to_indices, _low_memory_max_pool2d_with_offsets, getitem
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where, [2, 2], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %_low_memory_max_pool2d_offsets_to_indices : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_1, 2, 64, [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 33) % 33
x0 = xindex % 33
x2 = (xindex // 1089)
x4 = xindex % 1089
x5 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-65) + (2*x0) + (128*x1) + (4096*x2)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-64) + (2*x0) + (128*x1) + (4096*x2)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2*x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (128*x1) + (4096*x2)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + ((2*x0) + (128*x1) + (4096*x2)), tmp26 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 != 0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + (x4 + (1120*x2)), tmp28, xmask)
tl.store(out_ptr2 + (x5), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tq/ctq423m3higghu5rwkfx27kes7coot6nputhwykhm36gprgzoadl.py
# Topologically Sorted Source Nodes: [x_2, out2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out2 => gt_1, mul_1, where_1
# x_2 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 1089) % 128
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7l/c7l2sp6zoaduxwlfi3vie6gac7mr6pr7kmcphj4m5yqchvg7bch5.py
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_offsets_to_indices_1, _low_memory_max_pool2d_with_offsets_1, getitem_2
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_1, [2, 2], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %_low_memory_max_pool2d_offsets_to_indices_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_3, 2, 33, [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 17) % 17
x0 = xindex % 17
x2 = (xindex // 289)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-34) + (2*x0) + (66*x1) + (1089*x2)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-33) + (2*x0) + (66*x1) + (1089*x2)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2*x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (66*x1) + (1089*x2)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + ((2*x0) + (66*x1) + (1089*x2)), tmp26 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 != 0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + (x4), tmp28, xmask)
tl.store(out_ptr2 + (x4), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ld/cldyj33m7sflwes7s2y5s5gx5s4o6lb6uxhxygwb4z36mxzzja7o.py
# Topologically Sorted Source Nodes: [x_4, out3], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out3 => gt_2, mul_2, where_2
# x_4 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 289) % 256
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wp/cwpw7dk62l5he4ut4o4mxis6ehd6a7vfphbozxk5xleqojv6xq6z.py
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_2 => _low_memory_max_pool2d_offsets_to_indices_2, _low_memory_max_pool2d_with_offsets_2, getitem_4
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_2, [2, 2], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %_low_memory_max_pool2d_offsets_to_indices_2 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_5, 2, 17, [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 9) % 9
x0 = xindex % 9
x2 = (xindex // 81)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-18) + (2*x0) + (34*x1) + (289*x2)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-17) + (2*x0) + (34*x1) + (289*x2)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2*x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (34*x1) + (289*x2)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + ((2*x0) + (34*x1) + (289*x2)), tmp26 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 != 0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + (x4), tmp28, xmask)
tl.store(out_ptr2 + (x4), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7w/c7ww3vdepjlrxzlzidiplohg5cvtk7dl4ru6ibp4slb6ivb2vjrp.py
# Topologically Sorted Source Nodes: [x_6, out4], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out4 => gt_3, mul_3, where_3
# x_6 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 81) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ut/cut4hxrrkqpnihmxn5hei6ilacmd2o3qy5lji3fh3rcda22riew2.py
# Topologically Sorted Source Nodes: [max_pool2d_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d_3 => _low_memory_max_pool2d_offsets_to_indices_3, _low_memory_max_pool2d_with_offsets_3, getitem_6
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_3, [2, 2], [2, 2], [1, 1], [1, 1], False), kwargs = {})
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %_low_memory_max_pool2d_offsets_to_indices_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_offsets_to_indices.default](args = (%getitem_7, 2, 9, [2, 2], [1, 1]), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 5) % 5
x0 = xindex % 5
x2 = (xindex // 25)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-10) + (2*x0) + (18*x1) + (81*x2)), tmp10, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-9) + (2*x0) + (18*x1) + (81*x2)), tmp16, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2*x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + ((-1) + (2*x0) + (18*x1) + (81*x2)), tmp23, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + ((2*x0) + (18*x1) + (81*x2)), tmp26, eviction_policy='evict_last', other=float("-inf"))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 != 0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + (x4), tmp28, None)
tl.store(out_ptr2 + (x4), tmp46, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/av/caveetqikgzznwkgfbidjyrvrypscuz5sfhzv4y7gekezn7enlo3.py
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_8 => convolution_4
# x_9 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.2), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 102400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 25) % 1024
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/b7/cb75plcxmjn7steopvtgygttvot2pduchb3gr7skccikei657rrr.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_10 => convolution_5
# x_11 => gt_5, mul_5, where_5
# Graph fragment:
# %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_12, %primals_13, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_5, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_5, 0.2), kwargs = {})
# %where_5 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %convolution_5, %mul_5), kwargs = {})
triton_poi_fused_convolution_leaky_relu_9 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 25) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l2/cl2b6gcsqmom3jjsibpx2h6kanl7tkmygx7wx3xw3extgrsazpd6.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%max_unpool2d, %where_3], 1), kwargs = {})
triton_poi_fused_cat_10 = async_compile.triton('triton_poi_fused_cat_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 331776
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 81) % 1024
x0 = xindex % 81
x2 = (xindex // 82944)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (81*x1) + (41472*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 1024, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (81*((-512) + x1)) + (41472*x2)), tmp6, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ak/caktluqhxsdwmmze4u42tpkdxnxptxab4hck6um5dmflf7kdczps.py
# Topologically Sorted Source Nodes: [x_13, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_13 => convolution_6
# x_14 => gt_6, mul_6, where_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.2), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {})
triton_poi_fused_convolution_leaky_relu_11 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 256
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3l/c3ldpun2pjpr5izu4ji3yqakc65fcax6qejccfr4vhrdg32x2wdq.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%max_unpool2d_1, %where_2], 1), kwargs = {})
triton_poi_fused_cat_12 = async_compile.triton('triton_poi_fused_cat_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 289) % 512
x0 = xindex % 289
x2 = (xindex // 147968)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (289*x1) + (73984*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 512, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (289*((-256) + x1)) + (73984*x2)), tmp6, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/r5/cr54gf2noieft5ew2n4rdc36gxrljdof6umbsyk5g23pm66vfosh.py
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_16 => convolution_7
# x_17 => gt_7, mul_7, where_7
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_7 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_7, 0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_7, 0.2), kwargs = {})
# %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %convolution_7, %mul_7), kwargs = {})
triton_poi_fused_convolution_leaky_relu_13 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_13(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 289) % 128
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nk/cnku3f55qyo2pmzirpmjpponqfowkicg6oyitqhul3qxdqlpplge.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%max_unpool2d_2, %where_1], 1), kwargs = {})
triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 1089) % 256
x0 = xindex % 1089
x2 = (xindex // 278784)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1089*x1) + (139392*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 256, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (1089*((-128) + x1)) + (139392*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sq/csq6tgbaerl4v3spanlpmaladv637ehnm5dqgqyspncpcxlj4trd.py
# Topologically Sorted Source Nodes: [x_19, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_19 => convolution_8
# x_20 => gt_8, mul_8, where_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.2), kwargs = {})
# %where_8 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_8), kwargs = {})
triton_poi_fused_convolution_leaky_relu_15 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 1089) % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hx/chxv7q3z2ohnhirsw2xzjzriwuc5r5hmdwxvzgqa3qhx5ztu4olh.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%max_unpool2d_3, %where], 1), kwargs = {})
triton_poi_fused_cat_16 = async_compile.triton('triton_poi_fused_cat_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 128
x0 = xindex % 4096
x2 = (xindex // 524288)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (262144*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 128, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-64) + x1)) + (262144*x2)), tmp6, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aj/cajtwj3wehhmespau3nwqid6gj3qpdvrtsqwu4rk2bcgxwjsvv7d.py
# Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.convolution, aten._softmax]
# Source node to ATen node mapping:
# x_22 => convolution_9
# x_23 => amax, div, exp, sub, sum_1
# Graph fragment:
# %convolution_9 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution_9, [-3], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_9, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-3], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_convolution_17 = async_compile.triton('triton_poi_fused__softmax_convolution_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_convolution_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_convolution_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + (x0), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (512, ), (1, ))
assert_size_stride(primals_10, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_11, (1024, ), (1, ))
assert_size_stride(primals_12, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_13, (512, ), (1, ))
assert_size_stride(primals_14, (256, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (64, ), (1, ))
assert_size_stride(primals_20, (1, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, out1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 1048576, grid=grid(1048576), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 64, 33, 33), (71680, 1120, 33, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.int64)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf2, buf3, buf5, 278784, grid=grid(278784), stream=stream0)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 33, 33), (139392, 1089, 33, 1))
buf7 = empty_strided_cuda((4, 128, 33, 33), (139392, 1089, 33, 1), torch.bool)
buf8 = empty_strided_cuda((4, 128, 33, 33), (139392, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, out2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf6, primals_5, buf7, buf8, 557568, grid=grid(557568), stream=stream0)
del buf6
del primals_5
buf9 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.int64)
# Topologically Sorted Source Nodes: [max_pool2d_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf8, buf9, buf11, 147968, grid=grid(147968), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 17, 17), (73984, 289, 17, 1))
buf13 = empty_strided_cuda((4, 256, 17, 17), (73984, 289, 17, 1), torch.bool)
buf14 = empty_strided_cuda((4, 256, 17, 17), (73984, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4, out3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf12, primals_7, buf13, buf14, 295936, grid=grid(295936), stream=stream0)
del buf12
del primals_7
buf15 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
buf17 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.int64)
# Topologically Sorted Source Nodes: [max_pool2d_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf14, buf15, buf17, 82944, grid=grid(82944), stream=stream0)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf15, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 512, 9, 9), (41472, 81, 9, 1))
buf19 = empty_strided_cuda((4, 512, 9, 9), (41472, 81, 9, 1), torch.bool)
buf20 = empty_strided_cuda((4, 512, 9, 9), (41472, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6, out4], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf18, primals_9, buf19, buf20, 165888, grid=grid(165888), stream=stream0)
del buf18
del primals_9
buf21 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.float32)
buf23 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.int64)
# Topologically Sorted Source Nodes: [max_pool2d_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf20, buf21, buf23, 51200, grid=grid(51200), stream=stream0)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf21, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 1024, 5, 5), (25600, 25, 5, 1))
buf25 = empty_strided_cuda((4, 1024, 5, 5), (25600, 25, 5, 1), torch.bool)
buf26 = empty_strided_cuda((4, 1024, 5, 5), (25600, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8, x_9], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf24, primals_11, buf25, buf26, 102400, grid=grid(102400), stream=stream0)
del buf24
del primals_11
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 512, 5, 5), (12800, 25, 5, 1))
buf28 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.bool)
buf29 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_9.run(buf27, primals_13, buf28, buf29, 51200, grid=grid(51200), stream=stream0)
del buf27
del primals_13
# Topologically Sorted Source Nodes: [x_10, x_11, x_12], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_unpool2d]
buf30 = torch.ops.aten.max_unpool2d.default(buf29, buf23, [9, 9])
del buf29
buf31 = buf30
del buf30
buf32 = empty_strided_cuda((4, 1024, 9, 9), (82944, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_10.run(buf31, buf20, buf32, 331776, grid=grid(331776), stream=stream0)
del buf31
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.convolution]
buf33 = extern_kernels.convolution(buf32, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 256, 9, 9), (20736, 81, 9, 1))
buf34 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool)
buf35 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_11.run(buf33, primals_15, buf34, buf35, 82944, grid=grid(82944), stream=stream0)
del buf33
del primals_15
# Topologically Sorted Source Nodes: [x_13, x_14, x_15], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_unpool2d]
buf36 = torch.ops.aten.max_unpool2d.default(buf35, buf17, [17, 17])
del buf35
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, 512, 17, 17), (147968, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_12.run(buf37, buf14, buf38, 591872, grid=grid(591872), stream=stream0)
del buf37
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.convolution]
buf39 = extern_kernels.convolution(buf38, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 17, 17), (36992, 289, 17, 1))
buf40 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool)
buf41 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_13.run(buf39, primals_17, buf40, buf41, 147968, grid=grid(147968), stream=stream0)
del buf39
del primals_17
# Topologically Sorted Source Nodes: [x_16, x_17, x_18], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_unpool2d]
buf42 = torch.ops.aten.max_unpool2d.default(buf41, buf11, [33, 33])
del buf41
buf43 = buf42
del buf42
buf44 = empty_strided_cuda((4, 256, 33, 33), (278784, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf43, buf8, buf44, 1115136, grid=grid(1115136), stream=stream0)
del buf43
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
buf45 = extern_kernels.convolution(buf44, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf46 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool)
buf47 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_19, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_15.run(buf45, primals_19, buf46, buf47, 278784, grid=grid(278784), stream=stream0)
del buf45
del primals_19
# Topologically Sorted Source Nodes: [x_19, x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu, aten.max_unpool2d]
buf48 = torch.ops.aten.max_unpool2d.default(buf47, buf5, [64, 64])
del buf47
buf49 = buf48
del buf48
buf50 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_16.run(buf49, buf2, buf50, 2097152, grid=grid(2097152), stream=stream0)
del buf49
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf52 = buf51; del buf51 # reuse
# Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.convolution, aten._softmax]
triton_poi_fused__softmax_convolution_17.run(buf52, primals_21, 16384, grid=grid(16384), stream=stream0)
del primals_21
return (buf52, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, buf1, buf2, buf3, buf5, buf7, buf8, buf9, buf11, buf13, buf14, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf26, buf28, buf32, buf34, buf38, buf40, buf44, buf46, buf50, buf52, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((64, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((1, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AE(nn.Module):
def __init__(self):
super(AE, self).__init__()
self.leaky_reLU = nn.LeakyReLU(0.2)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1,
return_indices=True)
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2, padding=1)
self.softmax = nn.Softmax2d()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=
3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512,
kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=1024,
kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(in_channels=1024, out_channels=512,
kernel_size=3, stride=1, padding=1)
self.conv7 = nn.Conv2d(in_channels=1024, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv8 = nn.Conv2d(in_channels=512, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv9 = nn.Conv2d(in_channels=256, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv10 = nn.Conv2d(in_channels=128, out_channels=1,
kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = self.conv1(x)
out1 = self.leaky_reLU(x)
x = out1
size1 = x.size()
x, indices1 = self.pool(x)
x = self.conv2(x)
out2 = self.leaky_reLU(x)
x = out2
size2 = x.size()
x, indices2 = self.pool(x)
x = self.conv3(x)
out3 = self.leaky_reLU(x)
x = out3
size3 = x.size()
x, indices3 = self.pool(x)
x = self.conv4(x)
out4 = self.leaky_reLU(x)
x = out4
size4 = x.size()
x, indices4 = self.pool(x)
x = self.conv5(x)
x = self.leaky_reLU(x)
x = self.conv6(x)
x = self.leaky_reLU(x)
x = self.unpool(x, indices4, output_size=size4)
x = self.conv7(torch.cat((x, out4), 1))
x = self.leaky_reLU(x)
x = self.unpool(x, indices3, output_size=size3)
x = self.conv8(torch.cat((x, out3), 1))
x = self.leaky_reLU(x)
x = self.unpool(x, indices2, output_size=size2)
x = self.conv9(torch.cat((x, out2), 1))
x = self.leaky_reLU(x)
x = self.unpool(x, indices1, output_size=size1)
x = self.conv10(torch.cat((x, out1), 1))
x = self.softmax(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 33 % 33
x0 = xindex % 33
x2 = xindex // 1089
x4 = xindex % 1089
x5 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-65 + 2 * x0 + 128 * x1 + 4096 * x2), tmp10 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-64 + 2 * x0 + 128 * x1 + 4096 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 128 * x1 + 4096 * x2), tmp23 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (2 * x0 + 128 * x1 + 4096 * x2), tmp26 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 !=
0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + (x4 + 1120 * x2), tmp28, xmask)
tl.store(out_ptr2 + x5, tmp46, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1089 % 128
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 17 % 17
x0 = xindex % 17
x2 = xindex // 289
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 33, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-34 + 2 * x0 + 66 * x1 + 1089 * x2), tmp10 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-33 + 2 * x0 + 66 * x1 + 1089 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 66 * x1 + 1089 * x2), tmp23 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (2 * x0 + 66 * x1 + 1089 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 !=
0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + x4, tmp28, xmask)
tl.store(out_ptr2 + x4, tmp46, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 289 % 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 9 % 9
x0 = xindex % 9
x2 = xindex // 81
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 17, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-18 + 2 * x0 + 34 * x1 + 289 * x2), tmp10 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-17 + 2 * x0 + 34 * x1 + 289 * x2), tmp16 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 34 * x1 + 289 * x2), tmp23 &
xmask, eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (2 * x0 + 34 * x1 + 289 * x2), tmp26 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 !=
0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + x4, tmp28, xmask)
tl.store(out_ptr2 + x4, tmp46, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 81 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 5 % 5
x0 = xindex % 5
x2 = xindex // 25
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 9, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-10 + 2 * x0 + 18 * x1 + 81 * x2), tmp10,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-9 + 2 * x0 + 18 * x1 + 81 * x2), tmp16,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 * x1
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp22 & tmp9
tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 18 * x1 + 81 * x2), tmp23,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = tmp22 & tmp15
tmp27 = tl.load(in_ptr0 + (2 * x0 + 18 * x1 + 81 * x2), tmp26,
eviction_policy='evict_last', other=float('-inf'))
tmp28 = triton_helpers.maximum(tmp27, tmp25)
tmp29 = tmp17 > tmp11
tmp30 = tl.full([1], 1, tl.int8)
tmp31 = tl.full([1], 0, tl.int8)
tmp32 = tl.where(tmp29, tmp30, tmp31)
tmp33 = tmp24 > tmp18
tmp34 = tl.full([1], 2, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp27 > tmp25
tmp37 = tl.full([1], 3, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tl.full([1], 2, tl.int32)
tmp40 = tl.where((tmp38 < 0) != (tmp39 < 0), tl.where(tmp38 % tmp39 !=
0, tmp38 // tmp39 - 1, tmp38 // tmp39), tmp38 // tmp39)
tmp41 = tmp40 * tmp39
tmp42 = tmp38 - tmp41
tmp43 = tmp0 + tmp40
tmp44 = tmp6 + tmp42
tmp45 = tmp43 * tmp3
tmp46 = tmp45 + tmp44
tl.store(out_ptr0 + x4, tmp28, None)
tl.store(out_ptr2 + x4, tmp46, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 25 % 1024
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 25 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_cat_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 81 % 1024
x0 = xindex % 81
x2 = xindex // 82944
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 81 * x1 + 41472 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 1024, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 81 * (-512 + x1) + 41472 * x2), tmp6,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 256
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_12(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 289 % 512
x0 = xindex % 289
x2 = xindex // 147968
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 289 * x1 + 73984 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 289 * (-256 + x1) + 73984 * x2), tmp6,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_13(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 289 % 128
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 1089 % 256
x0 = xindex % 1089
x2 = xindex // 278784
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1089 * x1 + 139392 * x2), tmp4 & xmask,
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 1089 * (-128 + x1) + 139392 * x2), tmp6 &
xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1089 % 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_cat_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 128
x0 = xindex % 4096
x2 = xindex // 524288
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 262144 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-64 + x1) + 262144 * x2), tmp6,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused__softmax_convolution_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tmp3 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp5 / tmp5
tl.store(in_out_ptr0 + x0, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_11, (1024,), (1,))
assert_size_stride(primals_12, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_13, (512,), (1,))
assert_size_stride(primals_14, (256, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (1, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(1048576)](buf0,
primals_2, buf1, buf2, 1048576, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((4, 64, 33, 33), (71680, 1120, 33, 1),
torch.float32)
buf5 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.int64)
triton_poi_fused_max_pool2d_with_indices_1[grid(278784)](buf2, buf3,
buf5, 278784, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 33, 33), (139392, 1089, 33, 1))
buf7 = empty_strided_cuda((4, 128, 33, 33), (139392, 1089, 33, 1),
torch.bool)
buf8 = empty_strided_cuda((4, 128, 33, 33), (139392, 1089, 33, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(557568)](buf6,
primals_5, buf7, buf8, 557568, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf6
del primals_5
buf9 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.int64)
triton_poi_fused_max_pool2d_with_indices_3[grid(147968)](buf8, buf9,
buf11, 147968, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 17, 17), (73984, 289, 17, 1))
buf13 = empty_strided_cuda((4, 256, 17, 17), (73984, 289, 17, 1),
torch.bool)
buf14 = empty_strided_cuda((4, 256, 17, 17), (73984, 289, 17, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(295936)](buf12,
primals_7, buf13, buf14, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf12
del primals_7
buf15 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch
.float32)
buf17 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch
.int64)
triton_poi_fused_max_pool2d_with_indices_5[grid(82944)](buf14,
buf15, buf17, 82944, XBLOCK=512, num_warps=8, num_stages=1)
buf18 = extern_kernels.convolution(buf15, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 512, 9, 9), (41472, 81, 9, 1))
buf19 = empty_strided_cuda((4, 512, 9, 9), (41472, 81, 9, 1), torch
.bool)
buf20 = empty_strided_cuda((4, 512, 9, 9), (41472, 81, 9, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(165888)](buf18,
primals_9, buf19, buf20, 165888, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf18
del primals_9
buf21 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.float32)
buf23 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.int64)
triton_poi_fused_max_pool2d_with_indices_7[grid(51200)](buf20,
buf21, buf23, 51200, XBLOCK=512, num_warps=4, num_stages=1)
buf24 = extern_kernels.convolution(buf21, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 1024, 5, 5), (25600, 25, 5, 1))
buf25 = empty_strided_cuda((4, 1024, 5, 5), (25600, 25, 5, 1),
torch.bool)
buf26 = empty_strided_cuda((4, 1024, 5, 5), (25600, 25, 5, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_8[grid(102400)](buf24,
primals_11, buf25, buf26, 102400, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf24
del primals_11
buf27 = extern_kernels.convolution(buf26, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 512, 5, 5), (12800, 25, 5, 1))
buf28 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.bool)
buf29 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_9[grid(51200)](buf27,
primals_13, buf28, buf29, 51200, XBLOCK=512, num_warps=4,
num_stages=1)
del buf27
del primals_13
buf30 = torch.ops.aten.max_unpool2d.default(buf29, buf23, [9, 9])
del buf29
buf31 = buf30
del buf30
buf32 = empty_strided_cuda((4, 1024, 9, 9), (82944, 81, 9, 1),
torch.float32)
triton_poi_fused_cat_10[grid(331776)](buf31, buf20, buf32, 331776,
XBLOCK=1024, num_warps=4, num_stages=1)
del buf31
buf33 = extern_kernels.convolution(buf32, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf33, (4, 256, 9, 9), (20736, 81, 9, 1))
buf34 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch
.bool)
buf35 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_11[grid(82944)](buf33,
primals_15, buf34, buf35, 82944, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf33
del primals_15
buf36 = torch.ops.aten.max_unpool2d.default(buf35, buf17, [17, 17])
del buf35
buf37 = buf36
del buf36
buf38 = empty_strided_cuda((4, 512, 17, 17), (147968, 289, 17, 1),
torch.float32)
triton_poi_fused_cat_12[grid(591872)](buf37, buf14, buf38, 591872,
XBLOCK=512, num_warps=8, num_stages=1)
del buf37
buf39 = extern_kernels.convolution(buf38, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf39, (4, 128, 17, 17), (36992, 289, 17, 1))
buf40 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.bool)
buf41 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_13[grid(147968)](buf39,
primals_17, buf40, buf41, 147968, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf39
del primals_17
buf42 = torch.ops.aten.max_unpool2d.default(buf41, buf11, [33, 33])
del buf41
buf43 = buf42
del buf42
buf44 = empty_strided_cuda((4, 256, 33, 33), (278784, 1089, 33, 1),
torch.float32)
triton_poi_fused_cat_14[grid(1115136)](buf43, buf8, buf44, 1115136,
XBLOCK=1024, num_warps=4, num_stages=1)
del buf43
buf45 = extern_kernels.convolution(buf44, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf45, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf46 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.bool)
buf47 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_15[grid(278784)](buf45,
primals_19, buf46, buf47, 278784, XBLOCK=512, num_warps=8,
num_stages=1)
del buf45
del primals_19
buf48 = torch.ops.aten.max_unpool2d.default(buf47, buf5, [64, 64])
del buf47
buf49 = buf48
del buf48
buf50 = empty_strided_cuda((4, 128, 64, 64), (524288, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_16[grid(2097152)](buf49, buf2, buf50, 2097152,
XBLOCK=512, num_warps=8, num_stages=1)
del buf49
buf51 = extern_kernels.convolution(buf50, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf52 = buf51
del buf51
triton_poi_fused__softmax_convolution_17[grid(16384)](buf52,
primals_21, 16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_21
return (buf52, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, buf1, buf2, buf3, buf5, buf7, buf8, buf9, buf11, buf13,
buf14, buf15, buf17, buf19, buf20, buf21, buf23, buf25, buf26,
buf28, buf32, buf34, buf38, buf40, buf44, buf46, buf50, buf52)
class AENew(nn.Module):
def __init__(self):
super(AENew, self).__init__()
self.leaky_reLU = nn.LeakyReLU(0.2)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1,
return_indices=True)
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2, padding=1)
self.softmax = nn.Softmax2d()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=
3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512,
kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=1024,
kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(in_channels=1024, out_channels=512,
kernel_size=3, stride=1, padding=1)
self.conv7 = nn.Conv2d(in_channels=1024, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv8 = nn.Conv2d(in_channels=512, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv9 = nn.Conv2d(in_channels=256, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv10 = nn.Conv2d(in_channels=128, out_channels=1,
kernel_size=3, stride=1, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_12 = self.conv6.weight
primals_13 = self.conv6.bias
primals_14 = self.conv7.weight
primals_15 = self.conv7.bias
primals_16 = self.conv8.weight
primals_17 = self.conv8.bias
primals_18 = self.conv9.weight
primals_19 = self.conv9.bias
primals_20 = self.conv10.weight
primals_21 = self.conv10.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21])
return output[0]
| personwhofloat/Line-Segmentation-Model | AE | false | 7,576 | [
"MIT"
] | 1 | f00b65c7914f44fa31e14d41120903d0da2d5496 | https://github.com/personwhofloat/Line-Segmentation-Model/tree/f00b65c7914f44fa31e14d41120903d0da2d5496 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.leaky_reLU = nn.LeakyReLU(0.2)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1,
return_indices=True)
self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2, padding=1)
self.softmax = nn.Softmax2d()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=
3, stride=1, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512,
kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=1024,
kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(in_channels=1024, out_channels=512,
kernel_size=3, stride=1, padding=1)
self.conv7 = nn.Conv2d(in_channels=1024, out_channels=256,
kernel_size=3, stride=1, padding=1)
self.conv8 = nn.Conv2d(in_channels=512, out_channels=128,
kernel_size=3, stride=1, padding=1)
self.conv9 = nn.Conv2d(in_channels=256, out_channels=64,
kernel_size=3, stride=1, padding=1)
self.conv10 = nn.Conv2d(in_channels=128, out_channels=1,
kernel_size=3, stride=1, padding=1)
def forward(self, x):
x = self.conv1(x)
out1 = self.leaky_reLU(x)
x = out1
size1 = x.size()
x, indices1 = self.pool(x)
x = self.conv2(x)
out2 = self.leaky_reLU(x)
x = out2
size2 = x.size()
x, indices2 = self.pool(x)
x = self.conv3(x)
out3 = self.leaky_reLU(x)
x = out3
size3 = x.size()
x, indices3 = self.pool(x)
x = self.conv4(x)
out4 = self.leaky_reLU(x)
x = out4
size4 = x.size()
x, indices4 = self.pool(x)
x = self.conv5(x)
x = self.leaky_reLU(x)
x = self.conv6(x)
x = self.leaky_reLU(x)
x = self.unpool(x, indices4, output_size=size4)
x = self.conv7(torch.cat((x, out4), 1))
x = self.leaky_reLU(x)
x = self.unpool(x, indices3, output_size=size3)
x = self.conv8(torch.cat((x, out3), 1))
x = self.leaky_reLU(x)
x = self.unpool(x, indices2, output_size=size2)
x = self.conv9(torch.cat((x, out2), 1))
x = self.leaky_reLU(x)
x = self.unpool(x, indices1, output_size=size1)
x = self.conv10(torch.cat((x, out1), 1))
x = self.softmax(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
GeM | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hd/chdukesefbws4tpse2o7oyb4iohucll3brvsjhi7zvzctr5x6zs4.py
# Topologically Sorted Source Nodes: [clamp, pow_1, avg_pool2d, pow_2], Original ATen: [aten.clamp, aten.pow, aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# clamp => clamp_min
# pow_1 => pow_1
# pow_2 => pow_2
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-06), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 3), kwargs = {})
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%pow_1, [4, 4]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%avg_pool2d, 0.3333333333333333), kwargs = {})
triton_poi_fused_avg_pool2d_clamp_pow_0 = async_compile.triton('triton_poi_fused_avg_pool2d_clamp_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_clamp_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_clamp_pow_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp60 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp65 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp70 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp75 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp2
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = tmp6 * tmp6
tmp8 = tmp7 * tmp6
tmp9 = tmp8 + tmp4
tmp11 = triton_helpers.maximum(tmp10, tmp1)
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp11
tmp14 = tmp13 + tmp9
tmp16 = triton_helpers.maximum(tmp15, tmp1)
tmp17 = tmp16 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 + tmp14
tmp21 = triton_helpers.maximum(tmp20, tmp1)
tmp22 = tmp21 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 + tmp19
tmp26 = triton_helpers.maximum(tmp25, tmp1)
tmp27 = tmp26 * tmp26
tmp28 = tmp27 * tmp26
tmp29 = tmp28 + tmp24
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp31 * tmp31
tmp33 = tmp32 * tmp31
tmp34 = tmp33 + tmp29
tmp36 = triton_helpers.maximum(tmp35, tmp1)
tmp37 = tmp36 * tmp36
tmp38 = tmp37 * tmp36
tmp39 = tmp38 + tmp34
tmp41 = triton_helpers.maximum(tmp40, tmp1)
tmp42 = tmp41 * tmp41
tmp43 = tmp42 * tmp41
tmp44 = tmp43 + tmp39
tmp46 = triton_helpers.maximum(tmp45, tmp1)
tmp47 = tmp46 * tmp46
tmp48 = tmp47 * tmp46
tmp49 = tmp48 + tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp1)
tmp52 = tmp51 * tmp51
tmp53 = tmp52 * tmp51
tmp54 = tmp53 + tmp49
tmp56 = triton_helpers.maximum(tmp55, tmp1)
tmp57 = tmp56 * tmp56
tmp58 = tmp57 * tmp56
tmp59 = tmp58 + tmp54
tmp61 = triton_helpers.maximum(tmp60, tmp1)
tmp62 = tmp61 * tmp61
tmp63 = tmp62 * tmp61
tmp64 = tmp63 + tmp59
tmp66 = triton_helpers.maximum(tmp65, tmp1)
tmp67 = tmp66 * tmp66
tmp68 = tmp67 * tmp66
tmp69 = tmp68 + tmp64
tmp71 = triton_helpers.maximum(tmp70, tmp1)
tmp72 = tmp71 * tmp71
tmp73 = tmp72 * tmp71
tmp74 = tmp73 + tmp69
tmp76 = triton_helpers.maximum(tmp75, tmp1)
tmp77 = tmp76 * tmp76
tmp78 = tmp77 * tmp76
tmp79 = tmp78 + tmp74
tmp80 = 0.0625
tmp81 = tmp79 * tmp80
tmp82 = 0.3333333333333333
tmp83 = libdevice.pow(tmp81, tmp82)
tl.store(in_out_ptr0 + (x0), tmp83, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [clamp, pow_1, avg_pool2d, pow_2], Original ATen: [aten.clamp, aten.pow, aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_clamp_pow_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
def gem(x, p=3, eps=1e-06):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(
1.0 / p)
class GeM(torch.nn.Module):
"""
Implementation of GeM pooling.
https://paperswithcode.com/method/generalized-mean-pooling
NOTE:
p is learnable, but there is a consensus that it is better to fix the p value at 3.
"""
def __init__(self, p=3, eps=1e-06):
super(GeM, self).__init__()
self.p = p
self.eps = eps
def forward(self, x):
return gem(x, p=self.p, eps=self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_clamp_pow_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp10 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp20 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp35 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp40 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp45 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp50 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp55 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp60 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp65 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp70 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp75 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp2
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = tmp6 * tmp6
tmp8 = tmp7 * tmp6
tmp9 = tmp8 + tmp4
tmp11 = triton_helpers.maximum(tmp10, tmp1)
tmp12 = tmp11 * tmp11
tmp13 = tmp12 * tmp11
tmp14 = tmp13 + tmp9
tmp16 = triton_helpers.maximum(tmp15, tmp1)
tmp17 = tmp16 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 + tmp14
tmp21 = triton_helpers.maximum(tmp20, tmp1)
tmp22 = tmp21 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 + tmp19
tmp26 = triton_helpers.maximum(tmp25, tmp1)
tmp27 = tmp26 * tmp26
tmp28 = tmp27 * tmp26
tmp29 = tmp28 + tmp24
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp31 * tmp31
tmp33 = tmp32 * tmp31
tmp34 = tmp33 + tmp29
tmp36 = triton_helpers.maximum(tmp35, tmp1)
tmp37 = tmp36 * tmp36
tmp38 = tmp37 * tmp36
tmp39 = tmp38 + tmp34
tmp41 = triton_helpers.maximum(tmp40, tmp1)
tmp42 = tmp41 * tmp41
tmp43 = tmp42 * tmp41
tmp44 = tmp43 + tmp39
tmp46 = triton_helpers.maximum(tmp45, tmp1)
tmp47 = tmp46 * tmp46
tmp48 = tmp47 * tmp46
tmp49 = tmp48 + tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp1)
tmp52 = tmp51 * tmp51
tmp53 = tmp52 * tmp51
tmp54 = tmp53 + tmp49
tmp56 = triton_helpers.maximum(tmp55, tmp1)
tmp57 = tmp56 * tmp56
tmp58 = tmp57 * tmp56
tmp59 = tmp58 + tmp54
tmp61 = triton_helpers.maximum(tmp60, tmp1)
tmp62 = tmp61 * tmp61
tmp63 = tmp62 * tmp61
tmp64 = tmp63 + tmp59
tmp66 = triton_helpers.maximum(tmp65, tmp1)
tmp67 = tmp66 * tmp66
tmp68 = tmp67 * tmp66
tmp69 = tmp68 + tmp64
tmp71 = triton_helpers.maximum(tmp70, tmp1)
tmp72 = tmp71 * tmp71
tmp73 = tmp72 * tmp71
tmp74 = tmp73 + tmp69
tmp76 = triton_helpers.maximum(tmp75, tmp1)
tmp77 = tmp76 * tmp76
tmp78 = tmp77 * tmp76
tmp79 = tmp78 + tmp74
tmp80 = 0.0625
tmp81 = tmp79 * tmp80
tmp82 = 0.3333333333333333
tmp83 = libdevice.pow(tmp81, tmp82)
tl.store(in_out_ptr0 + x0, tmp83, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_avg_pool2d_clamp_pow_0[grid(16)](buf1, arg0_1, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf1,
def gem(x, p=3, eps=1e-06):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(
1.0 / p)
class GeMNew(torch.nn.Module):
"""
Implementation of GeM pooling.
https://paperswithcode.com/method/generalized-mean-pooling
NOTE:
p is learnable, but there is a consensus that it is better to fix the p value at 3.
"""
def __init__(self, p=3, eps=1e-06):
super(GeMNew, self).__init__()
self.p = p
self.eps = eps
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| rskmoi/landmark-retrieval-2020-with-pytorch | GeM | false | 7,577 | [
"MIT"
] | 1 | 41917b1f588b5ad396cb1095867a0f042c611675 | https://github.com/rskmoi/landmark-retrieval-2020-with-pytorch/tree/41917b1f588b5ad396cb1095867a0f042c611675 | import torch
import torch.nn.functional as F
def gem(x, p=3, eps=1e-06):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(
1.0 / p)
class Model(torch.nn.Module):
"""
Implementation of GeM pooling.
https://paperswithcode.com/method/generalized-mean-pooling
NOTE:
p is learnable, but there is a consensus that it is better to fix the p value at 3.
"""
def __init__(self, p=3, eps=1e-06):
super().__init__()
self.p = p
self.eps = eps
def forward(self, x):
return gem(x, p=self.p, eps=self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
L2Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/az/cazayodov2pejo62rnac6rludmval2wsivw6phwx6vee62yxa77i.py
# Topologically Sorted Source Nodes: [pow_1, sum_1], Original ATen: [aten.pow, aten.sum]
# Source node to ATen node mapping:
# pow_1 => pow_1
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
triton_red_fused_pow_sum_0 = async_compile.triton('triton_red_fused_pow_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = (xindex // 16)
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (2048*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yb/cybccy6ik3u5wdvejbe7sswchqjx66t2tiskszpk5e3v4xddkzk3.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, clamp, rsqrt], Original ATen: [aten.pow, aten.sum, aten.clamp, aten.rsqrt]
# Source node to ATen node mapping:
# clamp => clamp_min
# pow_1 => pow_1
# rsqrt => rsqrt
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sum_1, 1e-12), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%clamp_min,), kwargs = {})
triton_per_fused_clamp_pow_rsqrt_sum_1 = async_compile.triton('triton_per_fused_clamp_pow_rsqrt_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.OUTER_TINY,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_pow_rsqrt_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*r2) + (64*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-12
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = libdevice.rsqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/go/cgo6toghmmfwug7mqg2i6moqp5yn5zkdrlyxi73gxriq4acxdhks.py
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %rsqrt), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 512
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 8192)
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp3 = tl.load(in_ptr2 + (x0 + (16*x2)), None, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (4, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1], Original ATen: [aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_red_fused_pow_sum_0.run(primals_2, buf0, 256, 128, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [pow_1, sum_1, clamp, rsqrt], Original ATen: [aten.pow, aten.sum, aten.clamp, aten.rsqrt]
triton_per_fused_clamp_pow_rsqrt_sum_1.run(buf2, buf0, 64, 4, grid=grid(64), stream=stream0)
del buf0
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(primals_1, primals_2, buf2, buf3, 32768, grid=grid(32768), stream=stream0)
del primals_1
return (buf3, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class L2Norm(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2Norm, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, data):
return self.scale * data * data.pow(2).sum(dim=1, keepdim=True).clamp(
min=1e-12).rsqrt()
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_red_fused_pow_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 16
x1 = xindex // 16
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 2048 * x1), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_per_fused_clamp_pow_rsqrt_sum_1(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 16
x1 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * r2 + 64 * x1), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1e-12
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = libdevice.rsqrt(tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 512
x3 = xindex
x0 = xindex % 16
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, None)
tmp3 = tl.load(in_ptr2 + (x0 + 16 * x2), None, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (4, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 256, 4, 1, 16),
torch.float32)
get_raw_stream(0)
triton_red_fused_pow_sum_0[grid(256)](primals_2, buf0, 256, 128,
XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 4, 4), (16, 64, 4, 1), torch.float32)
buf2 = reinterpret_tensor(buf1, (4, 1, 4, 4), (16, 16, 4, 1), 0)
del buf1
triton_per_fused_clamp_pow_rsqrt_sum_1[grid(64)](buf2, buf0, 64, 4,
XBLOCK=64, num_warps=2, num_stages=1)
del buf0
buf3 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_mul_2[grid(32768)](primals_1, primals_2, buf2,
buf3, 32768, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
return buf3, primals_2, buf2
class L2NormNew(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super(L2NormNew, self).__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, input_0):
primals_1 = self.scale
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| rotorliu/DALI | L2Norm | false | 7,578 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 4ea3529fc9b35cbdf09b260ec95197cfd52c0395 | https://github.com/rotorliu/DALI/tree/4ea3529fc9b35cbdf09b260ec95197cfd52c0395 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
"""
Scale shall be learnable according to original paper
scale: initial scale number
chan_num: L2Norm channel number (norm over all channels)
"""
def __init__(self, scale=20, chan_num=512):
super().__init__()
self.scale = nn.Parameter(torch.Tensor([scale] * chan_num).view(1,
chan_num, 1, 1))
def forward(self, data):
return self.scale * data * data.pow(2).sum(dim=1, keepdim=True).clamp(
min=1e-12).rsqrt()
def get_inputs():
return [torch.rand([4, 512, 4, 4])]
def get_init_inputs():
return []
|
SRCNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7a/c7a2sqxnc6bi7sq5fihvseqxlvh33ljnmvvaziqhjhuxequqirct.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.floor, aten.clamp, aten.rsub, aten._unsafe_index]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, _unsafe_index_10, _unsafe_index_11, _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, _unsafe_index_2, _unsafe_index_3, _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, _unsafe_index_8, _unsafe_index_9, add, add_10, add_11, add_12, add_13, add_14, add_15, add_16, add_17, add_18, add_19, add_20, add_21, add_22, add_23, add_24, add_25, add_26, add_27, add_28, add_29, add_30, add_6, add_7, add_8, add_9, clamp_max, clamp_max_1, clamp_min, clamp_min_1, convert_element_type, floor, floor_1, iota, mul, mul_10, mul_11, mul_12, mul_13, mul_14, mul_15, mul_16, mul_17, mul_18, mul_19, mul_2, mul_20, mul_21, mul_22, mul_23, mul_24, mul_25, mul_26, mul_27, mul_28, mul_29, mul_3, mul_30, mul_31, mul_32, mul_33, mul_34, mul_35, mul_36, mul_37, mul_38, mul_39, mul_4, mul_40, mul_41, mul_42, mul_43, mul_44, mul_45, mul_5, mul_6, mul_7, mul_8, mul_9, sub, sub_10, sub_11, sub_12, sub_13, sub_14, sub_15, sub_16, sub_17, sub_18, sub_19, sub_2, sub_20, sub_21, sub_3, sub_6, sub_7, sub_8, sub_9
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.25), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 0.5), kwargs = {})
# %floor : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%sub,), kwargs = {})
# %floor_1 : [num_users=2] = call_function[target=torch.ops.aten.floor.default](args = (%unsqueeze,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %floor_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1.0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %floor), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_1 : [num_users=6] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max_1, 1.0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, -0.75), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, -3.75), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %add_6), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, -6.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, %add_6), kwargs = {})
# %sub_7 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_4, -3.0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max_1, 1.25), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_5, 2.25), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_8, %clamp_max_1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %clamp_max_1), kwargs = {})
# %add_8 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, 1), kwargs = {})
# %sub_9 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max_1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, 1.25), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_8, 2.25), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %sub_9), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_9, %sub_9), kwargs = {})
# %add_9 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_10, 1), kwargs = {})
# %sub_11 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max_1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, -0.75), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_11, -3.75), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %sub_11), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_12, -6.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_10, %sub_11), kwargs = {})
# %sub_13 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_13, -3.0), kwargs = {})
# %add_11 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%clamp_max, 1.0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_11, -0.75), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_14, -3.75), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %add_11), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, -6.0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_12, %add_11), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_16, -3.0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%clamp_max, 1.25), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_17, 2.25), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %clamp_max), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, 1), kwargs = {})
# %sub_17 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %clamp_max), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, 1.25), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_20, 2.25), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %sub_17), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_21, %sub_17), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_22, 1), kwargs = {})
# %sub_19 : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (2.0, %clamp_max), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, -0.75), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_23, -3.75), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %sub_19), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_24, -6.0), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_15, %sub_19), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_25, -3.0), kwargs = {})
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_5]), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_7]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_2, %clamp_max_9]), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index, %sub_7), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_1, %add_8), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, %mul_27), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_2, %add_9), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_16, %mul_28), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_3, %sub_13), kwargs = {})
# %add_18 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %mul_29), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_3]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_10, %clamp_max_9]), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_4, %sub_7), kwargs = {})
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_5, %add_8), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_30, %mul_31), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_6, %add_9), kwargs = {})
# %add_20 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_19, %mul_32), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_7, %sub_13), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %mul_33), kwargs = {})
# %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_3]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_5]), kwargs = {})
# %_unsafe_index_10 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_7]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_18, %clamp_max_9]), kwargs = {})
# %mul_34 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_8, %sub_7), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_9, %add_8), kwargs = {})
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_34, %mul_35), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_10, %add_9), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_22, %mul_36), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_11, %sub_13), kwargs = {})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_23, %mul_37), kwargs = {})
# %_unsafe_index_12 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_3]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_5]), kwargs = {})
# %_unsafe_index_14 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_7]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %clamp_max_26, %clamp_max_9]), kwargs = {})
# %mul_38 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_12, %sub_7), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_13, %add_8), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_38, %mul_39), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_14, %add_9), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_40), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%_unsafe_index_15, %sub_13), kwargs = {})
# %add_27 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_26, %mul_41), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_18, %sub_15), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, %add_13), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_42, %mul_43), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_24, %add_14), kwargs = {})
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_28, %mul_44), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_27, %sub_21), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_29, %mul_45), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(in_out_ptr1, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = libdevice.floor(tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 - tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tmp15 = x0
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp16 + tmp2
tmp18 = tmp17 * tmp4
tmp19 = tmp18 - tmp2
tmp20 = libdevice.floor(tmp19)
tmp21 = tmp20.to(tl.int32)
tmp22 = tmp21 - tmp9
tmp23 = triton_helpers.maximum(tmp22, tmp11)
tmp24 = triton_helpers.minimum(tmp23, tmp13)
tmp25 = tl.load(in_ptr0 + (tmp24 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp26 = tmp19 - tmp20
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = 1.0
tmp30 = triton_helpers.minimum(tmp28, tmp29)
tmp31 = tmp30 + tmp29
tmp32 = -0.75
tmp33 = tmp31 * tmp32
tmp34 = -3.75
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp31
tmp37 = -6.0
tmp38 = tmp36 + tmp37
tmp39 = tmp38 * tmp31
tmp40 = -3.0
tmp41 = tmp39 - tmp40
tmp42 = tmp25 * tmp41
tmp43 = triton_helpers.maximum(tmp21, tmp11)
tmp44 = triton_helpers.minimum(tmp43, tmp13)
tmp45 = tl.load(in_ptr0 + (tmp44 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp46 = 1.25
tmp47 = tmp30 * tmp46
tmp48 = 2.25
tmp49 = tmp47 - tmp48
tmp50 = tmp49 * tmp30
tmp51 = tmp50 * tmp30
tmp52 = tmp51 + tmp29
tmp53 = tmp45 * tmp52
tmp54 = tmp21 + tmp9
tmp55 = triton_helpers.maximum(tmp54, tmp11)
tmp56 = triton_helpers.minimum(tmp55, tmp13)
tmp57 = tl.load(in_ptr0 + (tmp56 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp58 = tmp29 - tmp30
tmp59 = tmp58 * tmp46
tmp60 = tmp59 - tmp48
tmp61 = tmp60 * tmp58
tmp62 = tmp61 * tmp58
tmp63 = tmp62 + tmp29
tmp64 = tmp57 * tmp63
tmp65 = triton_helpers.maximum(tmp8, tmp11)
tmp66 = triton_helpers.minimum(tmp65, tmp13)
tmp67 = tl.load(in_ptr0 + (tmp24 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp68 = tmp67 * tmp41
tmp69 = tl.full([1], 2, tl.int64)
tmp70 = tmp21 + tmp69
tmp71 = triton_helpers.maximum(tmp70, tmp11)
tmp72 = triton_helpers.minimum(tmp71, tmp13)
tmp73 = tl.load(in_ptr0 + (tmp72 + (4*tmp14) + (16*x2)), xmask, eviction_policy='evict_last')
tmp74 = 2.0
tmp75 = tmp74 - tmp30
tmp76 = tmp75 * tmp32
tmp77 = tmp76 - tmp34
tmp78 = tmp77 * tmp75
tmp79 = tmp78 + tmp37
tmp80 = tmp79 * tmp75
tmp81 = tmp80 - tmp40
tmp82 = tmp73 * tmp81
tmp83 = tl.load(in_ptr0 + (tmp44 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp84 = tmp83 * tmp52
tmp85 = tl.load(in_ptr0 + (tmp56 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp86 = tmp85 * tmp63
tmp87 = tmp8 + tmp9
tmp88 = triton_helpers.maximum(tmp87, tmp11)
tmp89 = triton_helpers.minimum(tmp88, tmp13)
tmp90 = tl.load(in_ptr0 + (tmp24 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp91 = tmp90 * tmp41
tmp92 = tl.load(in_ptr0 + (tmp72 + (4*tmp66) + (16*x2)), xmask, eviction_policy='evict_last')
tmp93 = tmp92 * tmp81
tmp94 = tl.load(in_ptr0 + (tmp44 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp95 = tmp94 * tmp52
tmp96 = tl.load(in_ptr0 + (tmp56 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp97 = tmp96 * tmp63
tmp98 = tmp8 + tmp69
tmp99 = triton_helpers.maximum(tmp98, tmp11)
tmp100 = triton_helpers.minimum(tmp99, tmp13)
tmp101 = tl.load(in_ptr0 + (tmp24 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp102 = tmp101 * tmp41
tmp103 = tl.load(in_ptr0 + (tmp72 + (4*tmp89) + (16*x2)), xmask, eviction_policy='evict_last')
tmp104 = tmp103 * tmp81
tmp105 = tl.load(in_ptr0 + (tmp44 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp106 = tmp105 * tmp52
tmp107 = tl.load(in_ptr0 + (tmp56 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp108 = tmp107 * tmp63
tmp109 = tl.load(in_ptr0 + (tmp72 + (4*tmp100) + (16*x2)), xmask, eviction_policy='evict_last')
tmp110 = tmp109 * tmp81
tmp111 = tmp42 + tmp53
tmp112 = tmp111 + tmp64
tmp113 = tmp112 + tmp82
tmp114 = tmp6 - tmp7
tmp115 = triton_helpers.maximum(tmp114, tmp27)
tmp116 = triton_helpers.minimum(tmp115, tmp29)
tmp117 = tmp116 + tmp29
tmp118 = tmp117 * tmp32
tmp119 = tmp118 - tmp34
tmp120 = tmp119 * tmp117
tmp121 = tmp120 + tmp37
tmp122 = tmp121 * tmp117
tmp123 = tmp122 - tmp40
tmp124 = tmp113 * tmp123
tmp125 = tmp68 + tmp84
tmp126 = tmp125 + tmp86
tmp127 = tmp126 + tmp93
tmp128 = tmp116 * tmp46
tmp129 = tmp128 - tmp48
tmp130 = tmp129 * tmp116
tmp131 = tmp130 * tmp116
tmp132 = tmp131 + tmp29
tmp133 = tmp127 * tmp132
tmp134 = tmp124 + tmp133
tmp135 = tmp91 + tmp95
tmp136 = tmp135 + tmp97
tmp137 = tmp136 + tmp104
tmp138 = tmp29 - tmp116
tmp139 = tmp138 * tmp46
tmp140 = tmp139 - tmp48
tmp141 = tmp140 * tmp138
tmp142 = tmp141 * tmp138
tmp143 = tmp142 + tmp29
tmp144 = tmp137 * tmp143
tmp145 = tmp134 + tmp144
tmp146 = tmp102 + tmp106
tmp147 = tmp146 + tmp108
tmp148 = tmp147 + tmp110
tmp149 = tmp74 - tmp116
tmp150 = tmp149 * tmp32
tmp151 = tmp150 - tmp34
tmp152 = tmp151 * tmp149
tmp153 = tmp152 + tmp37
tmp154 = tmp153 * tmp149
tmp155 = tmp154 - tmp40
tmp156 = tmp148 * tmp155
tmp157 = tmp145 + tmp156
tl.store(in_out_ptr1 + (x3), tmp157, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f4/cf4q74veoggsxdgdkl43ap6cyqfylpfk3qs7wdqoebyfzzb36dvw.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add_30, %primals_2, %primals_3, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bj/cbjysb56yh4ggfzb72c3xdhbbnmqhfc3pvpexw6rfp2nme2jhyyl.py
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# out_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7x/c7xnwtrjfqhdkxhfsdsjlkr7ml5ojqmtd2lrl7npuiczn7woxe2e.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 256) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (64, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (3, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch.float32)
buf18 = buf10; del buf10 # reuse
buf20 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.floor, aten.clamp, aten.rsub, aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0.run(buf20, primals_1, 3072, grid=grid(3072), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_2, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 16, 16), (16384, 256, 16, 1))
buf22 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf22, primals_3, 65536, grid=grid(65536), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf22, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 16, 16), (8192, 256, 16, 1))
buf24 = buf23; del buf23 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf24, primals_5, 32768, grid=grid(32768), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf25 = extern_kernels.convolution(buf24, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 3, 16, 16), (768, 256, 16, 1))
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf26, primals_7, 3072, grid=grid(3072), stream=stream0)
del primals_7
return (buf26, primals_2, primals_4, primals_6, buf20, buf22, buf24, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 3, 9, 9), (243, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((3, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import logging
import torch
import torch.nn as nn
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmsr".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
class SRCNN(nn.Module):
"""SRCNN network structure for image super resolution.
SRCNN has three conv layers. For each layer, we can define the
`in_channels`, `out_channels` and `kernel_size`.
The input image will first be upsampled with a bicubic upsampler, and then
super-resolved in the HR spatial size.
Paper: Learning a Deep Convolutional Network for Image Super-Resolution.
Args:
channels (tuple[int]): A tuple of channel numbers for each layer
including channels of input and output . Default: (3, 64, 32, 3).
kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer.
Default: (9, 1, 5).
upscale_factor (int): Upsampling factor. Default: 4.
"""
def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5),
upscale_factor=4):
super(SRCNN, self).__init__()
assert len(channels
) == 4, f'The length of channel tuple should be 4, but got {len(channels)}'
assert len(kernel_sizes
) == 3, f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}'
self.upscale_factor = upscale_factor
self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor,
mode='bicubic', align_corners=False)
self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size=
kernel_sizes[0], padding=kernel_sizes[0] // 2)
self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size=
kernel_sizes[1], padding=kernel_sizes[1] // 2)
self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size=
kernel_sizes[2], padding=kernel_sizes[2] // 2)
self.relu = nn.ReLU()
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x = self.img_upsampler(x)
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
out = self.conv3(out)
return out
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass
else:
raise TypeError(
f'"pretrained" must be a str or None. But received {type(pretrained)}.'
)
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import logging
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0(
in_out_ptr1, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = 0.25
tmp5 = tmp3 * tmp4
tmp6 = tmp5 - tmp2
tmp7 = libdevice.floor(tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 - tmp9
tmp11 = tl.full([1], 0, tl.int64)
tmp12 = triton_helpers.maximum(tmp10, tmp11)
tmp13 = tl.full([1], 3, tl.int64)
tmp14 = triton_helpers.minimum(tmp12, tmp13)
tmp15 = x0
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp16 + tmp2
tmp18 = tmp17 * tmp4
tmp19 = tmp18 - tmp2
tmp20 = libdevice.floor(tmp19)
tmp21 = tmp20.to(tl.int32)
tmp22 = tmp21 - tmp9
tmp23 = triton_helpers.maximum(tmp22, tmp11)
tmp24 = triton_helpers.minimum(tmp23, tmp13)
tmp25 = tl.load(in_ptr0 + (tmp24 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp26 = tmp19 - tmp20
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = 1.0
tmp30 = triton_helpers.minimum(tmp28, tmp29)
tmp31 = tmp30 + tmp29
tmp32 = -0.75
tmp33 = tmp31 * tmp32
tmp34 = -3.75
tmp35 = tmp33 - tmp34
tmp36 = tmp35 * tmp31
tmp37 = -6.0
tmp38 = tmp36 + tmp37
tmp39 = tmp38 * tmp31
tmp40 = -3.0
tmp41 = tmp39 - tmp40
tmp42 = tmp25 * tmp41
tmp43 = triton_helpers.maximum(tmp21, tmp11)
tmp44 = triton_helpers.minimum(tmp43, tmp13)
tmp45 = tl.load(in_ptr0 + (tmp44 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp46 = 1.25
tmp47 = tmp30 * tmp46
tmp48 = 2.25
tmp49 = tmp47 - tmp48
tmp50 = tmp49 * tmp30
tmp51 = tmp50 * tmp30
tmp52 = tmp51 + tmp29
tmp53 = tmp45 * tmp52
tmp54 = tmp21 + tmp9
tmp55 = triton_helpers.maximum(tmp54, tmp11)
tmp56 = triton_helpers.minimum(tmp55, tmp13)
tmp57 = tl.load(in_ptr0 + (tmp56 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp58 = tmp29 - tmp30
tmp59 = tmp58 * tmp46
tmp60 = tmp59 - tmp48
tmp61 = tmp60 * tmp58
tmp62 = tmp61 * tmp58
tmp63 = tmp62 + tmp29
tmp64 = tmp57 * tmp63
tmp65 = triton_helpers.maximum(tmp8, tmp11)
tmp66 = triton_helpers.minimum(tmp65, tmp13)
tmp67 = tl.load(in_ptr0 + (tmp24 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp68 = tmp67 * tmp41
tmp69 = tl.full([1], 2, tl.int64)
tmp70 = tmp21 + tmp69
tmp71 = triton_helpers.maximum(tmp70, tmp11)
tmp72 = triton_helpers.minimum(tmp71, tmp13)
tmp73 = tl.load(in_ptr0 + (tmp72 + 4 * tmp14 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp74 = 2.0
tmp75 = tmp74 - tmp30
tmp76 = tmp75 * tmp32
tmp77 = tmp76 - tmp34
tmp78 = tmp77 * tmp75
tmp79 = tmp78 + tmp37
tmp80 = tmp79 * tmp75
tmp81 = tmp80 - tmp40
tmp82 = tmp73 * tmp81
tmp83 = tl.load(in_ptr0 + (tmp44 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp84 = tmp83 * tmp52
tmp85 = tl.load(in_ptr0 + (tmp56 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp86 = tmp85 * tmp63
tmp87 = tmp8 + tmp9
tmp88 = triton_helpers.maximum(tmp87, tmp11)
tmp89 = triton_helpers.minimum(tmp88, tmp13)
tmp90 = tl.load(in_ptr0 + (tmp24 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp91 = tmp90 * tmp41
tmp92 = tl.load(in_ptr0 + (tmp72 + 4 * tmp66 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp93 = tmp92 * tmp81
tmp94 = tl.load(in_ptr0 + (tmp44 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp95 = tmp94 * tmp52
tmp96 = tl.load(in_ptr0 + (tmp56 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp97 = tmp96 * tmp63
tmp98 = tmp8 + tmp69
tmp99 = triton_helpers.maximum(tmp98, tmp11)
tmp100 = triton_helpers.minimum(tmp99, tmp13)
tmp101 = tl.load(in_ptr0 + (tmp24 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp102 = tmp101 * tmp41
tmp103 = tl.load(in_ptr0 + (tmp72 + 4 * tmp89 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp104 = tmp103 * tmp81
tmp105 = tl.load(in_ptr0 + (tmp44 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp106 = tmp105 * tmp52
tmp107 = tl.load(in_ptr0 + (tmp56 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp108 = tmp107 * tmp63
tmp109 = tl.load(in_ptr0 + (tmp72 + 4 * tmp100 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp110 = tmp109 * tmp81
tmp111 = tmp42 + tmp53
tmp112 = tmp111 + tmp64
tmp113 = tmp112 + tmp82
tmp114 = tmp6 - tmp7
tmp115 = triton_helpers.maximum(tmp114, tmp27)
tmp116 = triton_helpers.minimum(tmp115, tmp29)
tmp117 = tmp116 + tmp29
tmp118 = tmp117 * tmp32
tmp119 = tmp118 - tmp34
tmp120 = tmp119 * tmp117
tmp121 = tmp120 + tmp37
tmp122 = tmp121 * tmp117
tmp123 = tmp122 - tmp40
tmp124 = tmp113 * tmp123
tmp125 = tmp68 + tmp84
tmp126 = tmp125 + tmp86
tmp127 = tmp126 + tmp93
tmp128 = tmp116 * tmp46
tmp129 = tmp128 - tmp48
tmp130 = tmp129 * tmp116
tmp131 = tmp130 * tmp116
tmp132 = tmp131 + tmp29
tmp133 = tmp127 * tmp132
tmp134 = tmp124 + tmp133
tmp135 = tmp91 + tmp95
tmp136 = tmp135 + tmp97
tmp137 = tmp136 + tmp104
tmp138 = tmp29 - tmp116
tmp139 = tmp138 * tmp46
tmp140 = tmp139 - tmp48
tmp141 = tmp140 * tmp138
tmp142 = tmp141 * tmp138
tmp143 = tmp142 + tmp29
tmp144 = tmp137 * tmp143
tmp145 = tmp134 + tmp144
tmp146 = tmp102 + tmp106
tmp147 = tmp146 + tmp108
tmp148 = tmp147 + tmp110
tmp149 = tmp74 - tmp116
tmp150 = tmp149 * tmp32
tmp151 = tmp150 - tmp34
tmp152 = tmp151 * tmp149
tmp153 = tmp152 + tmp37
tmp154 = tmp153 * tmp149
tmp155 = tmp154 - tmp40
tmp156 = tmp148 * tmp155
tmp157 = tmp145 + tmp156
tl.store(in_out_ptr1 + x3, tmp157, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 256 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (64, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (32, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (3, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 3, 16, 16), (768, 256, 16, 1), torch
.float32)
buf18 = buf10
del buf10
buf20 = buf18
del buf18
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_floor_mul_rsub_sub_0[
grid(3072)](buf20, primals_1, 3072, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_1
buf21 = extern_kernels.convolution(buf20, primals_2, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 64, 16, 16), (16384, 256, 16, 1))
buf22 = buf21
del buf21
triton_poi_fused_convolution_relu_1[grid(65536)](buf22, primals_3,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_3
buf23 = extern_kernels.convolution(buf22, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 32, 16, 16), (8192, 256, 16, 1))
buf24 = buf23
del buf23
triton_poi_fused_convolution_relu_2[grid(32768)](buf24, primals_5,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf25 = extern_kernels.convolution(buf24, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf25, (4, 3, 16, 16), (768, 256, 16, 1))
buf26 = buf25
del buf25
triton_poi_fused_convolution_3[grid(3072)](buf26, primals_7, 3072,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return buf26, primals_2, primals_4, primals_6, buf20, buf22, buf24
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmsr".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
class SRCNNNew(nn.Module):
"""SRCNN network structure for image super resolution.
SRCNN has three conv layers. For each layer, we can define the
`in_channels`, `out_channels` and `kernel_size`.
The input image will first be upsampled with a bicubic upsampler, and then
super-resolved in the HR spatial size.
Paper: Learning a Deep Convolutional Network for Image Super-Resolution.
Args:
channels (tuple[int]): A tuple of channel numbers for each layer
including channels of input and output . Default: (3, 64, 32, 3).
kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer.
Default: (9, 1, 5).
upscale_factor (int): Upsampling factor. Default: 4.
"""
def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5),
upscale_factor=4):
super(SRCNNNew, self).__init__()
assert len(channels
) == 4, f'The length of channel tuple should be 4, but got {len(channels)}'
assert len(kernel_sizes
) == 3, f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}'
self.upscale_factor = upscale_factor
self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor,
mode='bicubic', align_corners=False)
self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size=
kernel_sizes[0], padding=kernel_sizes[0] // 2)
self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size=
kernel_sizes[1], padding=kernel_sizes[1] // 2)
self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size=
kernel_sizes[2], padding=kernel_sizes[2] // 2)
self.relu = nn.ReLU()
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass
else:
raise TypeError(
f'"pretrained" must be a str or None. But received {type(pretrained)}.'
)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| rivergold/mmediting | SRCNN | false | 7,579 | [
"Apache-2.0"
] | 1 | fd972635c48bb065db29d1b5090592a87c7263d2 | https://github.com/rivergold/mmediting/tree/fd972635c48bb065db29d1b5090592a87c7263d2 | import logging
import torch
import torch.nn as nn
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmsr".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
class Model(nn.Module):
"""SRCNN network structure for image super resolution.
SRCNN has three conv layers. For each layer, we can define the
`in_channels`, `out_channels` and `kernel_size`.
The input image will first be upsampled with a bicubic upsampler, and then
super-resolved in the HR spatial size.
Paper: Learning a Deep Convolutional Network for Image Super-Resolution.
Args:
channels (tuple[int]): A tuple of channel numbers for each layer
including channels of input and output . Default: (3, 64, 32, 3).
kernel_sizes (tuple[int]): A tuple of kernel sizes for each conv layer.
Default: (9, 1, 5).
upscale_factor (int): Upsampling factor. Default: 4.
"""
def __init__(self, channels=(3, 64, 32, 3), kernel_sizes=(9, 1, 5),
upscale_factor=4):
super().__init__()
assert len(channels
) == 4, f'The length of channel tuple should be 4, but got {len(channels)}'
assert len(kernel_sizes
) == 3, f'The length of kernel tuple should be 3, but got {len(kernel_sizes)}'
self.upscale_factor = upscale_factor
self.img_upsampler = nn.Upsample(scale_factor=self.upscale_factor,
mode='bicubic', align_corners=False)
self.conv1 = nn.Conv2d(channels[0], channels[1], kernel_size=
kernel_sizes[0], padding=kernel_sizes[0] // 2)
self.conv2 = nn.Conv2d(channels[1], channels[2], kernel_size=
kernel_sizes[1], padding=kernel_sizes[1] // 2)
self.conv3 = nn.Conv2d(channels[2], channels[3], kernel_size=
kernel_sizes[2], padding=kernel_sizes[2] // 2)
self.relu = nn.ReLU()
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x = self.img_upsampler(x)
out = self.relu(self.conv1(x))
out = self.relu(self.conv2(out))
out = self.conv3(out)
return out
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is None:
pass
else:
raise TypeError(
f'"pretrained" must be a str or None. But received {type(pretrained)}.'
)
def get_inputs():
return [torch.rand([4, 3, 4, 4])]
def get_init_inputs():
return []
|
Swish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/it/citnyysnlw3kfqkdroubinf7gsanznoazbh2vl35ps7sn5fnga5p.py
# Topologically Sorted Source Nodes: [mul, exp, add, swish], Original ATen: [aten.mul, aten.exp, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# exp => exp
# mul => mul
# swish => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, -1.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {})
triton_poi_fused_add_div_exp_mul_0 = async_compile.triton('triton_poi_fused_add_div_exp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_exp_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_exp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = 1.0
tmp5 = tmp3 + tmp4
tmp6 = tmp0 / tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, exp, add, swish], Original ATen: [aten.mul, aten.exp, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_exp_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
from torch import nn
def swish(x, beta=1.0):
"""Swish activation.
'https://arxiv.org/pdf/1710.05941.pdf'
Args:
x: Input tensor.
beta:
"""
return SwishOP.apply(x, beta)
class SwishOP(Function):
@staticmethod
def forward(ctx, tensor, beta=1.0):
ctx.save_for_backward(tensor)
ctx.beta = beta
swish = tensor / (1 + torch.exp(-beta * tensor))
return swish
@staticmethod
def backward(ctx, grad_outputs):
tensor = ctx.saved_tensors[0]
beta = ctx.beta
grad_swish = (torch.exp(-beta * tensor) * (1 + beta * tensor) + 1) / (
1 + torch.exp(-beta * tensor)) ** 2
grad_swish = grad_outputs * grad_swish
return grad_swish, None
class Swish(nn.Module):
"""Switch activation from 'SEARCHING FOR ACTIVATION FUNCTIONS'
https://arxiv.org/pdf/1710.05941.pdf
swish = x / (1 + e^-beta*x)
d_swish = (1 + (1+beta*x)) / ((1 + e^-beta*x)^2)
"""
def __init__(self, beta=1.0):
super(Swish, self).__init__()
self.beta = beta
def forward(self, x):
return swish(x, self.beta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.autograd import Function
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_exp_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -1.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = 1.0
tmp5 = tmp3 + tmp4
tmp6 = tmp0 / tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_exp_mul_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
def swish(x, beta=1.0):
"""Swish activation.
'https://arxiv.org/pdf/1710.05941.pdf'
Args:
x: Input tensor.
beta:
"""
return SwishOP.apply(x, beta)
class SwishOP(Function):
@staticmethod
def forward(ctx, tensor, beta=1.0):
ctx.save_for_backward(tensor)
ctx.beta = beta
swish = tensor / (1 + torch.exp(-beta * tensor))
return swish
@staticmethod
def backward(ctx, grad_outputs):
tensor = ctx.saved_tensors[0]
beta = ctx.beta
grad_swish = (torch.exp(-beta * tensor) * (1 + beta * tensor) + 1) / (
1 + torch.exp(-beta * tensor)) ** 2
grad_swish = grad_outputs * grad_swish
return grad_swish, None
class SwishNew(nn.Module):
"""Switch activation from 'SEARCHING FOR ACTIVATION FUNCTIONS'
https://arxiv.org/pdf/1710.05941.pdf
swish = x / (1 + e^-beta*x)
d_swish = (1 + (1+beta*x)) / ((1 + e^-beta*x)^2)
"""
def __init__(self, beta=1.0):
super(SwishNew, self).__init__()
self.beta = beta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| sailfish009/torch-toolbox | Swish | false | 7,580 | [
"BSD-3-Clause"
] | 1 | 80dfc22c697b9f323e097de72af04f0e5435d7b4 | https://github.com/sailfish009/torch-toolbox/tree/80dfc22c697b9f323e097de72af04f0e5435d7b4 | from torch.autograd import Function
import torch
from torch import nn
def swish(x, beta=1.0):
"""Swish activation.
'https://arxiv.org/pdf/1710.05941.pdf'
Args:
x: Input tensor.
beta:
"""
return SwishOP.apply(x, beta)
class SwishOP(Function):
@staticmethod
def forward(ctx, tensor, beta=1.0):
ctx.save_for_backward(tensor)
ctx.beta = beta
swish = tensor / (1 + torch.exp(-beta * tensor))
return swish
@staticmethod
def backward(ctx, grad_outputs):
tensor = ctx.saved_tensors[0]
beta = ctx.beta
grad_swish = (torch.exp(-beta * tensor) * (1 + beta * tensor) + 1) / (
1 + torch.exp(-beta * tensor)) ** 2
grad_swish = grad_outputs * grad_swish
return grad_swish, None
class Model(nn.Module):
"""Switch activation from 'SEARCHING FOR ACTIVATION FUNCTIONS'
https://arxiv.org/pdf/1710.05941.pdf
swish = x / (1 + e^-beta*x)
d_swish = (1 + (1+beta*x)) / ((1 + e^-beta*x)^2)
"""
def __init__(self, beta=1.0):
super().__init__()
self.beta = beta
def forward(self, x):
return swish(x, self.beta)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wb/cwbbl3gjg4pj7nbrdrqfohlucoinxsctai4vhlnlwgnogep26h2v.py
# Topologically Sorted Source Nodes: [hidden1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# hidden1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 24000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1500
x1 = (xindex // 1500)
tmp0 = tl.load(in_out_ptr0 + (x0 + (1504*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + (1504*x1)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/zy/czymijhut34sm34zvz7sbkzzakgydt5s3uaexwtsq6z36symd6ma.py
# Topologically Sorted Source Nodes: [z_scale], Original ATen: [aten.exp]
# Source node to ATen node mapping:
# z_scale => exp
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_exp_1 = async_compile.triton('triton_poi_fused_exp_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl_math.exp(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1500, 16), (16, 1))
assert_size_stride(primals_3, (1500, ), (1, ))
assert_size_stride(primals_4, (1500, 1500), (1500, 1))
assert_size_stride(primals_5, (1500, ), (1, ))
assert_size_stride(primals_6, (30, 1500), (1500, 1))
assert_size_stride(primals_7, (30, ), (1, ))
assert_size_stride(primals_8, (30, 1500), (1500, 1))
assert_size_stride(primals_9, (30, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1500), (1504, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), reinterpret_tensor(primals_2, (16, 1500), (1, 16), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [hidden1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 24000, grid=grid(24000), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((16, 1500), (1504, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1500, 1500), (1, 1500), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [hidden2], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf3, primals_5, 24000, grid=grid(24000), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 30), (30, 1), torch.float32)
# Topologically Sorted Source Nodes: [z_loc], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (1500, 30), (1, 1500), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((16, 30), (30, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (1500, 30), (1, 1500), 0), out=buf5)
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [z_scale], Original ATen: [aten.exp]
triton_poi_fused_exp_1.run(buf6, primals_9, 480, grid=grid(480), stream=stream0)
del primals_9
return (buf4, buf6, reinterpret_tensor(primals_1, (16, 16), (16, 1), 0), buf1, buf3, buf6, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1500, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1500, 1500), (1500, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((30, 1500), (1500, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((30, 1500), (1500, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Encoder(nn.Module):
"""
Takes in data, returns mu and sigma for variational approximation of latent variable.
"""
def __init__(self, alph_size, seq_len, z_dim=30, hidden_architecture=[
1500, 1500]):
super(Encoder, self).__init__()
self.hidden1 = nn.Linear(alph_size * seq_len, hidden_architecture[0])
self.hidden2 = nn.Linear(hidden_architecture[0], hidden_architecture[1]
)
self.final1 = nn.Linear(hidden_architecture[1], z_dim)
self.final2 = nn.Linear(hidden_architecture[1], z_dim)
self.relu = nn.ReLU()
self.alph_size = alph_size
self.seq_len = seq_len
def forward(self, x):
x = x.reshape(-1, self.seq_len * self.alph_size)
hidden1 = self.relu(self.hidden1(x))
hidden2 = self.relu(self.hidden2(hidden1))
z_loc = self.final1(hidden2)
z_scale = torch.exp(self.final2(hidden2))
return z_loc, z_scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'alph_size': 4, 'seq_len': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 24000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 1500
x1 = xindex // 1500
tmp0 = tl.load(in_out_ptr0 + (x0 + 1504 * x1), xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x0 + 1504 * x1), tmp4, xmask)
@triton.jit
def triton_poi_fused_exp_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 480
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl_math.exp(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1500, 16), (16, 1))
assert_size_stride(primals_3, (1500,), (1,))
assert_size_stride(primals_4, (1500, 1500), (1500, 1))
assert_size_stride(primals_5, (1500,), (1,))
assert_size_stride(primals_6, (30, 1500), (1500, 1))
assert_size_stride(primals_7, (30,), (1,))
assert_size_stride(primals_8, (30, 1500), (1500, 1))
assert_size_stride(primals_9, (30,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 1500), (1504, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 16), (16, 1),
0), reinterpret_tensor(primals_2, (16, 1500), (1, 16), 0), out=buf0
)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(24000)](buf1, primals_3, 24000, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((16, 1500), (1504, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (1500, 1500),
(1, 1500), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_0[grid(24000)](buf3, primals_5, 24000, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 30), (30, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(1500, 30), (1, 1500), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((16, 30), (30, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_8, (1500, 30), (
1, 1500), 0), out=buf5)
buf6 = buf5
del buf5
triton_poi_fused_exp_1[grid(480)](buf6, primals_9, 480, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_9
return buf4, buf6, reinterpret_tensor(primals_1, (16, 16), (16, 1), 0
), buf1, buf3, buf6, primals_8, primals_6, primals_4
class EncoderNew(nn.Module):
"""
Takes in data, returns mu and sigma for variational approximation of latent variable.
"""
def __init__(self, alph_size, seq_len, z_dim=30, hidden_architecture=[
1500, 1500]):
super(EncoderNew, self).__init__()
self.hidden1 = nn.Linear(alph_size * seq_len, hidden_architecture[0])
self.hidden2 = nn.Linear(hidden_architecture[0], hidden_architecture[1]
)
self.final1 = nn.Linear(hidden_architecture[1], z_dim)
self.final2 = nn.Linear(hidden_architecture[1], z_dim)
self.relu = nn.ReLU()
self.alph_size = alph_size
self.seq_len = seq_len
def forward(self, input_0):
primals_2 = self.hidden1.weight
primals_3 = self.hidden1.bias
primals_4 = self.hidden2.weight
primals_5 = self.hidden2.bias
primals_6 = self.final1.weight
primals_7 = self.final1.bias
primals_8 = self.final2.weight
primals_9 = self.final2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| rorymaizels/AC299r | Encoder | false | 7,581 | [
"MIT"
] | 1 | eb4b76ad52a10b9af0579ec3f725ec8fc90b00f1 | https://github.com/rorymaizels/AC299r/tree/eb4b76ad52a10b9af0579ec3f725ec8fc90b00f1 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Takes in data, returns mu and sigma for variational approximation of latent variable.
"""
def __init__(self, alph_size, seq_len, z_dim=30, hidden_architecture=[
1500, 1500]):
super().__init__()
self.hidden1 = nn.Linear(alph_size * seq_len, hidden_architecture[0])
self.hidden2 = nn.Linear(hidden_architecture[0], hidden_architecture[1]
)
self.final1 = nn.Linear(hidden_architecture[1], z_dim)
self.final2 = nn.Linear(hidden_architecture[1], z_dim)
self.relu = nn.ReLU()
self.alph_size = alph_size
self.seq_len = seq_len
def forward(self, x):
x = x.reshape(-1, self.seq_len * self.alph_size)
hidden1 = self.relu(self.hidden1(x))
hidden2 = self.relu(self.hidden2(hidden1))
z_loc = self.final1(hidden2)
z_scale = torch.exp(self.final2(hidden2))
return z_loc, z_scale
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k3/ck3futvjgnn5ywxhit2wnwjstmaqothiqqhmuc5447rgoovbelmt.py
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, sum_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.sum, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# exp => exp
# exp_1 => exp_1
# exp_2 => exp_3
# invprobs => abs_1, exp_2, full_default, log1p, minimum, neg_4, sub_3
# log => log
# loss => add_2
# loss_1 => mul_4
# max_val => clamp_min
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# neg => neg
# neg_1 => neg_1
# neg_2 => neg_2
# neg_3 => neg_3
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sum_1 => sum_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %neg_3 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 2.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg_3, %sub_2), kwargs = {})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %mul_2), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%mul_2,), kwargs = {})
# %neg_4 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_4,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, 2), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %mul), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%neg, 0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, %clamp_min), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%clamp_min,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %neg_2 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg1_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%neg_2, %clamp_min), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%exp, %exp_1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %log), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp_3, %add_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_4, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp29 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp31 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp56 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp58 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp83 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp85 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp0 * tmp2
tmp18 = tmp0 - tmp17
tmp19 = triton_helpers.maximum(tmp1, tmp8)
tmp20 = tmp18 + tmp19
tmp21 = -tmp19
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp1 - tmp19
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tl_math.log(tmp25)
tmp27 = tmp20 + tmp26
tmp28 = tmp16 * tmp27
tmp30 = -tmp29
tmp32 = tmp31 * tmp3
tmp33 = tmp32 - tmp5
tmp34 = tmp30 * tmp33
tmp35 = triton_helpers.minimum(tmp8, tmp34)
tmp36 = tl_math.abs(tmp34)
tmp37 = -tmp36
tmp38 = tl_math.exp(tmp37)
tmp39 = libdevice.log1p(tmp38)
tmp40 = tmp35 - tmp39
tmp41 = tmp40 * tmp3
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp29 * tmp31
tmp44 = tmp29 - tmp43
tmp45 = triton_helpers.maximum(tmp30, tmp8)
tmp46 = tmp44 + tmp45
tmp47 = -tmp45
tmp48 = tl_math.exp(tmp47)
tmp49 = tmp30 - tmp45
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp48 + tmp50
tmp52 = tl_math.log(tmp51)
tmp53 = tmp46 + tmp52
tmp54 = tmp42 * tmp53
tmp55 = tmp28 + tmp54
tmp57 = -tmp56
tmp59 = tmp58 * tmp3
tmp60 = tmp59 - tmp5
tmp61 = tmp57 * tmp60
tmp62 = triton_helpers.minimum(tmp8, tmp61)
tmp63 = tl_math.abs(tmp61)
tmp64 = -tmp63
tmp65 = tl_math.exp(tmp64)
tmp66 = libdevice.log1p(tmp65)
tmp67 = tmp62 - tmp66
tmp68 = tmp67 * tmp3
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp56 * tmp58
tmp71 = tmp56 - tmp70
tmp72 = triton_helpers.maximum(tmp57, tmp8)
tmp73 = tmp71 + tmp72
tmp74 = -tmp72
tmp75 = tl_math.exp(tmp74)
tmp76 = tmp57 - tmp72
tmp77 = tl_math.exp(tmp76)
tmp78 = tmp75 + tmp77
tmp79 = tl_math.log(tmp78)
tmp80 = tmp73 + tmp79
tmp81 = tmp69 * tmp80
tmp82 = tmp55 + tmp81
tmp84 = -tmp83
tmp86 = tmp85 * tmp3
tmp87 = tmp86 - tmp5
tmp88 = tmp84 * tmp87
tmp89 = triton_helpers.minimum(tmp8, tmp88)
tmp90 = tl_math.abs(tmp88)
tmp91 = -tmp90
tmp92 = tl_math.exp(tmp91)
tmp93 = libdevice.log1p(tmp92)
tmp94 = tmp89 - tmp93
tmp95 = tmp94 * tmp3
tmp96 = tl_math.exp(tmp95)
tmp97 = tmp83 * tmp85
tmp98 = tmp83 - tmp97
tmp99 = triton_helpers.maximum(tmp84, tmp8)
tmp100 = tmp98 + tmp99
tmp101 = -tmp99
tmp102 = tl_math.exp(tmp101)
tmp103 = tmp84 - tmp99
tmp104 = tl_math.exp(tmp103)
tmp105 = tmp102 + tmp104
tmp106 = tl_math.log(tmp105)
tmp107 = tmp100 + tmp106
tmp108 = tmp96 * tmp107
tmp109 = tmp82 + tmp108
tmp110 = tl.broadcast_to(tmp109, [XBLOCK, RBLOCK])
tmp112 = tl.sum(tmp110, 1)[:, None]
tmp113 = 64.0
tmp114 = tmp112 / tmp113
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp114, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [invprobs, neg_3, mul_1, sub_2, mul_2, mul_3, exp_2, mul, sub, neg, max_val, add, neg_1, exp, neg_2, sub_1, exp_1, add_1, log, loss, loss_1, sum_1, mean], Original ATen: [aten.log_sigmoid_forward, aten.neg, aten.mul, aten.sub, aten.exp, aten.clamp, aten.add, aten.log, aten.sum, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0.run(buf2, arg1_1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
class FocalLoss(torch.nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not target.size() == input.size():
raise ValueError(
'Target size ({}) must be the same as input size ({})'.
format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-
input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.sum(dim=1).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp29 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp31 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp56 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp58 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp83 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp85 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp1 = -tmp0
tmp3 = 2.0
tmp4 = tmp2 * tmp3
tmp5 = 1.0
tmp6 = tmp4 - tmp5
tmp7 = tmp1 * tmp6
tmp8 = 0.0
tmp9 = triton_helpers.minimum(tmp8, tmp7)
tmp10 = tl_math.abs(tmp7)
tmp11 = -tmp10
tmp12 = tl_math.exp(tmp11)
tmp13 = libdevice.log1p(tmp12)
tmp14 = tmp9 - tmp13
tmp15 = tmp14 * tmp3
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp0 * tmp2
tmp18 = tmp0 - tmp17
tmp19 = triton_helpers.maximum(tmp1, tmp8)
tmp20 = tmp18 + tmp19
tmp21 = -tmp19
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp1 - tmp19
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tl_math.log(tmp25)
tmp27 = tmp20 + tmp26
tmp28 = tmp16 * tmp27
tmp30 = -tmp29
tmp32 = tmp31 * tmp3
tmp33 = tmp32 - tmp5
tmp34 = tmp30 * tmp33
tmp35 = triton_helpers.minimum(tmp8, tmp34)
tmp36 = tl_math.abs(tmp34)
tmp37 = -tmp36
tmp38 = tl_math.exp(tmp37)
tmp39 = libdevice.log1p(tmp38)
tmp40 = tmp35 - tmp39
tmp41 = tmp40 * tmp3
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp29 * tmp31
tmp44 = tmp29 - tmp43
tmp45 = triton_helpers.maximum(tmp30, tmp8)
tmp46 = tmp44 + tmp45
tmp47 = -tmp45
tmp48 = tl_math.exp(tmp47)
tmp49 = tmp30 - tmp45
tmp50 = tl_math.exp(tmp49)
tmp51 = tmp48 + tmp50
tmp52 = tl_math.log(tmp51)
tmp53 = tmp46 + tmp52
tmp54 = tmp42 * tmp53
tmp55 = tmp28 + tmp54
tmp57 = -tmp56
tmp59 = tmp58 * tmp3
tmp60 = tmp59 - tmp5
tmp61 = tmp57 * tmp60
tmp62 = triton_helpers.minimum(tmp8, tmp61)
tmp63 = tl_math.abs(tmp61)
tmp64 = -tmp63
tmp65 = tl_math.exp(tmp64)
tmp66 = libdevice.log1p(tmp65)
tmp67 = tmp62 - tmp66
tmp68 = tmp67 * tmp3
tmp69 = tl_math.exp(tmp68)
tmp70 = tmp56 * tmp58
tmp71 = tmp56 - tmp70
tmp72 = triton_helpers.maximum(tmp57, tmp8)
tmp73 = tmp71 + tmp72
tmp74 = -tmp72
tmp75 = tl_math.exp(tmp74)
tmp76 = tmp57 - tmp72
tmp77 = tl_math.exp(tmp76)
tmp78 = tmp75 + tmp77
tmp79 = tl_math.log(tmp78)
tmp80 = tmp73 + tmp79
tmp81 = tmp69 * tmp80
tmp82 = tmp55 + tmp81
tmp84 = -tmp83
tmp86 = tmp85 * tmp3
tmp87 = tmp86 - tmp5
tmp88 = tmp84 * tmp87
tmp89 = triton_helpers.minimum(tmp8, tmp88)
tmp90 = tl_math.abs(tmp88)
tmp91 = -tmp90
tmp92 = tl_math.exp(tmp91)
tmp93 = libdevice.log1p(tmp92)
tmp94 = tmp89 - tmp93
tmp95 = tmp94 * tmp3
tmp96 = tl_math.exp(tmp95)
tmp97 = tmp83 * tmp85
tmp98 = tmp83 - tmp97
tmp99 = triton_helpers.maximum(tmp84, tmp8)
tmp100 = tmp98 + tmp99
tmp101 = -tmp99
tmp102 = tl_math.exp(tmp101)
tmp103 = tmp84 - tmp99
tmp104 = tl_math.exp(tmp103)
tmp105 = tmp102 + tmp104
tmp106 = tl_math.log(tmp105)
tmp107 = tmp100 + tmp106
tmp108 = tmp96 * tmp107
tmp109 = tmp82 + tmp108
tmp110 = tl.broadcast_to(tmp109, [XBLOCK, RBLOCK])
tmp112 = tl.sum(tmp110, 1)[:, None]
tmp113 = 64.0
tmp114 = tmp112 / tmp113
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp114, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_clamp_exp_log_log_sigmoid_forward_mean_mul_neg_sub_sum_0[
grid(1)](buf2, arg1_1, arg0_1, 1, 64, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
return buf2,
class FocalLossNew(torch.nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| rskmoi/kaggle-imet | FocalLoss | false | 7,582 | [
"MIT"
] | 1 | 483e9e6dbae5b1d8e023e0812c4b990afca874bc | https://github.com/rskmoi/kaggle-imet/tree/483e9e6dbae5b1d8e023e0812c4b990afca874bc | import torch
import torch.nn.functional as F
class Model(torch.nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not target.size() == input.size():
raise ValueError(
'Target size ({}) must be the same as input size ({})'.
format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + ((-max_val).exp() + (-
input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.sum(dim=1).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ECToCA3 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hc/chctpdian6xztgdbeufv7ter3vlvtq67azsmwpz2n2wuwalfk3er.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.1618), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 800
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1618
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q5/cq52p2qap7uob2ddnn4qeh67r3muutkp3yhbkqpu4eqaemol3idl.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_1 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_3,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (800, 4), (4, 1))
assert_size_stride(primals_2, (800, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 800), (800, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 800), (800, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 800), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 800), (12800, 3200, 800, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 800), (12800, 3200, 800, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 51200, grid=grid(51200), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 800), (800, 1), 0), reinterpret_tensor(primals_4, (800, 4), (1, 800), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf4, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 800), (800, 1), 0), buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((800, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((800, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 800), (800, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class ECToCA3(nn.Module):
def __init__(self, D_in, D_out):
super(ECToCA3, self).__init__()
self.fc1 = nn.Linear(D_in, 800)
self.fc2 = nn.Linear(800, D_out)
def forward(self, x):
x = F.leaky_relu(self.fc1(x), 0.1618)
x = torch.sigmoid(self.fc2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'D_in': 4, 'D_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 800
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1618
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (800, 4), (4, 1))
assert_size_stride(primals_2, (800,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 800), (800, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 800), (800, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 800), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 800), (12800, 3200, 800, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 800), (12800, 3200, 800, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(51200)](buf0, primals_2, buf1,
buf2, 51200, XBLOCK=512, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 800), (800, 1), 0),
reinterpret_tensor(primals_4, (800, 4), (1, 800), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_sigmoid_1[grid(256)](buf4, primals_5, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 800), (800, 1), 0
), buf4, primals_4
class ECToCA3New(nn.Module):
def __init__(self, D_in, D_out):
super(ECToCA3New, self).__init__()
self.fc1 = nn.Linear(D_in, 800)
self.fc2 = nn.Linear(800, D_out)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| sachio222/aha4 | ECToCA3 | false | 7,583 | [
"MIT"
] | 1 | ec378fe1bace85e325ad7cb8686b8ba321dc97d0 | https://github.com/sachio222/aha4/tree/ec378fe1bace85e325ad7cb8686b8ba321dc97d0 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, D_in, D_out):
super().__init__()
self.fc1 = nn.Linear(D_in, 800)
self.fc2 = nn.Linear(800, D_out)
def forward(self, x):
x = F.leaky_relu(self.fc1(x), 0.1618)
x = torch.sigmoid(self.fc2(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
n_to_one | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/lq/clqr3avydam6eggbmxm35jlmqgbve6pvevquq2kr6co6qon4pld3.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %convolution_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_4, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [y1], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
# Topologically Sorted Source Nodes: [y2], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_4, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf2, buf1, 49152, grid=grid(49152), stream=stream0)
del buf1
return (buf2, primals_1, primals_2, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class n_to_one(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(3, 3, 1, 1, bias=False)
def forward(self, x1, x2):
y1 = self.conv1(x1)
y2 = self.conv2(x2)
return y1 + y2
def get_inputs():
return [torch.rand([4, 3, 64, 64]), torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_4, (4, 3, 64, 64), (12288, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = extern_kernels.convolution(primals_4, primals_3, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(49152)](buf2, buf1, 49152, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
return buf2, primals_1, primals_2, primals_3, primals_4
class n_to_oneNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(3, 3, 1, 1, bias=False)
def forward(self, input_0, input_1):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| sailfish009/torch-toolbox | n_to_one | false | 7,584 | [
"BSD-3-Clause"
] | 1 | 80dfc22c697b9f323e097de72af04f0e5435d7b4 | https://github.com/sailfish009/torch-toolbox/tree/80dfc22c697b9f323e097de72af04f0e5435d7b4 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(3, 3, 1, 1, bias=False)
def forward(self, x1, x2):
y1 = self.conv1(x1)
y2 = self.conv2(x2)
return y1 + y2
def get_inputs():
return [torch.rand([4, 3, 64, 64]), torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
ActorDDPGNonConvNetwork | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pr/cprthrqz6iotcmrjfcrj7taqntzxisdcjtr54gsuz2ck2kf6kbsr.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + (x0), tmp5, xmask)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3f/c3fojdylt3acgtptumtowi32d26uvrqtyzefg7ta476ezqj5dtyp.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
# Source node to ATen node mapping:
# x_2 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 1]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*((x0 % 4) // 4)) + (16*(((4*((x0 // 4) % 4)) + (x0 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# output_1 => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_15,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf0 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, primals_4, out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf3 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf4, primals_5, buf8, 64, grid=grid(64), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf4
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (1, 4), (1, 1), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf7, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, buf7, primals_6, buf8, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
from numpy import *
def fanin_init(size, fanin=None):
fanin = fanin or size[0]
v = 1.0 / np.sqrt(fanin)
return torch.Tensor(size).uniform_(-v, v)
class ActorDDPGNonConvNetwork(nn.Module):
def __init__(self, num_hidden_layers, output_action, input):
super(ActorDDPGNonConvNetwork, self).__init__()
self.num_hidden_layers = num_hidden_layers
self.input = input
self.output_action = output_action
self.init_w = 0.003
self.dense_1 = nn.Linear(self.input, self.num_hidden_layers)
self.relu1 = nn.ReLU(inplace=True)
self.dense_2 = nn.Linear(self.num_hidden_layers, self.num_hidden_layers
)
self.relu2 = nn.ReLU(inplace=True)
self.output = nn.Linear(self.num_hidden_layers, self.output_action)
self.tanh = nn.Tanh()
def init_weights(self, init_w):
self.dense_1.weight.data = fanin_init(self.dense_1.weight.data.size())
self.dense_2.weight.data = fanin_init(self.dense_2.weight.data.size())
self.output.weight.data.uniform_(-init_w, init_w)
def forward(self, input):
x = self.dense_1(input)
x = self.relu1(x)
x = self.dense_2(x)
x = self.relu2(x)
output = self.output(x)
output = self.tanh(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_hidden_layers': 1, 'output_action': 4, 'input': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
from numpy import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.full([1], 0, tl.int32)
tmp5 = triton_helpers.maximum(tmp4, tmp3)
tmp6 = 0.0
tmp7 = tmp5 <= tmp6
tl.store(in_out_ptr0 + x0, tmp5, xmask)
tl.store(out_ptr0 + x0, tmp7, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * (x0 % 4 // 4) + 16 * ((4 * (x0 // 4 %
4) + x0 % 4) // 16)), xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 1), (1, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf1,
primals_2, buf9, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused_view_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 1), (1, 1), 0)
del buf1
extern_kernels.mm(buf2, primals_4, out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf3
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(64)](buf4,
primals_5, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused_view_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf4
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_6, (1, 4), (1, 1
), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_tanh_2[grid(256)](buf7, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, buf7, primals_6, buf8, primals_4, buf9
def fanin_init(size, fanin=None):
fanin = fanin or size[0]
v = 1.0 / np.sqrt(fanin)
return torch.Tensor(size).uniform_(-v, v)
class ActorDDPGNonConvNetworkNew(nn.Module):
def __init__(self, num_hidden_layers, output_action, input):
super(ActorDDPGNonConvNetworkNew, self).__init__()
self.num_hidden_layers = num_hidden_layers
self.input = input
self.output_action = output_action
self.init_w = 0.003
self.dense_1 = nn.Linear(self.input, self.num_hidden_layers)
self.relu1 = nn.ReLU(inplace=True)
self.dense_2 = nn.Linear(self.num_hidden_layers, self.num_hidden_layers
)
self.relu2 = nn.ReLU(inplace=True)
self.output = nn.Linear(self.num_hidden_layers, self.output_action)
self.tanh = nn.Tanh()
def init_weights(self, init_w):
self.dense_1.weight.data = fanin_init(self.dense_1.weight.data.size())
self.dense_2.weight.data = fanin_init(self.dense_2.weight.data.size())
self.output.weight.data.uniform_(-init_w, init_w)
def forward(self, input_0):
primals_1 = self.dense_1.weight
primals_2 = self.dense_1.bias
primals_4 = self.dense_2.weight
primals_5 = self.dense_2.bias
primals_6 = self.output.weight
primals_7 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ruyueshuo/MaskTrackRCNN | ActorDDPGNonConvNetwork | false | 7,585 | [
"Apache-2.0"
] | 1 | 3c6ada36be3c2b2df32176349ec5c0ee5b24e724 | https://github.com/ruyueshuo/MaskTrackRCNN/tree/3c6ada36be3c2b2df32176349ec5c0ee5b24e724 | import torch
import numpy as np
import torch.nn as nn
from numpy import *
def fanin_init(size, fanin=None):
fanin = fanin or size[0]
v = 1.0 / np.sqrt(fanin)
return torch.Tensor(size).uniform_(-v, v)
class Model(nn.Module):
def __init__(self, num_hidden_layers, output_action, input):
super().__init__()
self.num_hidden_layers = num_hidden_layers
self.input = input
self.output_action = output_action
self.init_w = 0.003
self.dense_1 = nn.Linear(self.input, self.num_hidden_layers)
self.relu1 = nn.ReLU(inplace=True)
self.dense_2 = nn.Linear(self.num_hidden_layers, self.num_hidden_layers
)
self.relu2 = nn.ReLU(inplace=True)
self.output = nn.Linear(self.num_hidden_layers, self.output_action)
self.tanh = nn.Tanh()
def init_weights(self, init_w):
self.dense_1.weight.data = fanin_init(self.dense_1.weight.data.size())
self.dense_2.weight.data = fanin_init(self.dense_2.weight.data.size())
self.output.weight.data.uniform_(-init_w, init_w)
def forward(self, input):
x = self.dense_1(input)
x = self.relu1(x)
x = self.dense_2(x)
x = self.relu2(x)
output = self.output(x)
output = self.tanh(output)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [1, 4, 4]
|
CA1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/h6/ch63tcmv5jjqrgkmk2uploq3pouvqozaj2lfxlilno7on236djc3.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.1), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p4/cp4vwepvlgg4xxd354kdb6w34t7xwpzbleyjdc5ornsgajd6cn7u.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.1), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100, ), (1, ))
assert_size_stride(primals_3, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (4, 100), (100, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4, 100), (400, 400, 100, 1), torch.bool)
buf2 = empty_strided_cuda((4, 1, 4, 100), (400, 1, 100, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 1600, grid=grid(1600), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf3
del primals_5
return (buf5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (16, 100), (100, 1), 0), buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CA1(nn.Module):
"""Reconstructs the inputs that originated from EC network.
Consists of 2 fully connected layers, recieving inputs from CA3
and outputs to EC.
"""
def __init__(self, N, D_in, D_out, resize_dim):
super(CA1, self).__init__()
self.N, self.resize_dim = N, resize_dim
self.fc1 = nn.Linear(D_in, 100)
self.fc2 = nn.Linear(100, D_out)
nn.init.xavier_uniform_(self.fc1.weight)
def forward(self, x):
x = F.leaky_relu(self.fc1(x), 0.1)
x = F.leaky_relu(self.fc2(x), 0.1)
x = x.view(self.N, 1, self.resize_dim, self.resize_dim)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [[], {'N': 4, 'D_in': 4, 'D_out': 4, 'resize_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 100
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_4, (4, 100), (100, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 1, 4, 100), (400, 400, 100, 1), torch
.bool)
buf2 = empty_strided_cuda((4, 1, 4, 100), (400, 1, 100, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(1600)](buf0, primals_2, buf1,
buf2, 1600, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 100), (100, 1), 0),
reinterpret_tensor(primals_4, (100, 4), (1, 100), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 1, 4, 4), (16, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(64)](buf3, primals_5, buf4, buf5,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf3
del primals_5
return buf5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (16, 100), (100, 1), 0
), buf4, primals_4
class CA1New(nn.Module):
"""Reconstructs the inputs that originated from EC network.
Consists of 2 fully connected layers, recieving inputs from CA3
and outputs to EC.
"""
def __init__(self, N, D_in, D_out, resize_dim):
super(CA1New, self).__init__()
self.N, self.resize_dim = N, resize_dim
self.fc1 = nn.Linear(D_in, 100)
self.fc2 = nn.Linear(100, D_out)
nn.init.xavier_uniform_(self.fc1.weight)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| sachio222/aha4 | CA1 | false | 7,586 | [
"MIT"
] | 1 | ec378fe1bace85e325ad7cb8686b8ba321dc97d0 | https://github.com/sachio222/aha4/tree/ec378fe1bace85e325ad7cb8686b8ba321dc97d0 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Reconstructs the inputs that originated from EC network.
Consists of 2 fully connected layers, recieving inputs from CA3
and outputs to EC.
"""
def __init__(self, N, D_in, D_out, resize_dim):
super().__init__()
self.N, self.resize_dim = N, resize_dim
self.fc1 = nn.Linear(D_in, 100)
self.fc2 = nn.Linear(100, D_out)
nn.init.xavier_uniform_(self.fc1.weight)
def forward(self, x):
x = F.leaky_relu(self.fc1(x), 0.1)
x = F.leaky_relu(self.fc2(x), 0.1)
x = x.view(self.N, 1, self.resize_dim, self.resize_dim)
return x
def get_inputs():
return [torch.rand([4, 1, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 4]
|
Sparsemax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nk/cnkmgeqruamtgccvbn4zkgty33cqveg7s4ow6q4qrojcnzzpb3wy.py
# Topologically Sorted Source Nodes: [max_1, X, sort, cumsum], Original ATen: [aten.max, aten.sub, aten.sort, aten.cumsum]
# Source node to ATen node mapping:
# X => sub
# cumsum => cumsum
# max_1 => max_1
# sort => sort
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %getitem), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%sub, -1, True), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%getitem_2, -1), kwargs = {})
triton_per_fused_cumsum_max_sort_sub_0 = async_compile.triton('triton_per_fused_cumsum_max_sort_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[64, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_max_sort_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_max_sort_sub_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = r1
tmp10 = tmp9.to(tl.int16)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, tmp14, = triton_helpers.sort_with_index(tmp11, tmp12, None, 1, stable=False, descending=True)
tmp15 = tmp13.to(tl.float32)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp8, xmask)
tl.store(out_ptr1 + (r1 + (4*x0)), tmp13, xmask)
tl.store(out_ptr2 + (r1 + (4*x0)), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/uc/cuc2f2cgz6j7wqohouz3uu6ir7263uhsfcodyhv3lka5vxtu4ewc.py
# Topologically Sorted Source Nodes: [mul, topk_cumsum, support, sum_1], Original ATen: [aten.mul, aten.sub, aten.gt, aten.sum]
# Source node to ATen node mapping:
# mul => mul_1
# sum_1 => sum_1
# support => gt
# topk_cumsum => sub_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute, %getitem_2), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cumsum, 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%mul_1, %sub_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%gt, [-1]), kwargs = {})
triton_poi_fused_gt_mul_sub_sum_1 = async_compile.triton('triton_poi_fused_gt_mul_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i64', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_mul_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp1 * tmp0
tmp4 = tmp3 - tmp1
tmp5 = tmp2 > tmp4
tmp6 = tmp5.to(tl.int64)
tmp8 = 2.0
tmp9 = tmp8 * tmp7
tmp11 = tmp10 - tmp1
tmp12 = tmp9 > tmp11
tmp13 = tmp12.to(tl.int64)
tmp14 = tmp6 + tmp13
tmp16 = 3.0
tmp17 = tmp16 * tmp15
tmp19 = tmp18 - tmp1
tmp20 = tmp17 > tmp19
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp14 + tmp21
tmp24 = 4.0
tmp25 = tmp24 * tmp23
tmp27 = tmp26 - tmp1
tmp28 = tmp25 > tmp27
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + (x0), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v4/cv4frpxvm4rhejbwxhericho6gutfmihwrgomlmqytuxrptdu7cn.py
# Topologically Sorted Source Nodes: [topk_cumsum, sub_2, tau, tau_1, sub_3, output], Original ATen: [aten.sub, aten.gather, aten.div, aten.clamp]
# Source node to ATen node mapping:
# output => clamp_min
# sub_2 => sub_2
# sub_3 => sub_3
# tau => gather
# tau_1 => div
# topk_cumsum => sub_1
# Graph fragment:
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%cumsum, 1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, 1), kwargs = {})
# %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%sub_1, -1, %sub_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%gather, %unsqueeze), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %div), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
triton_poi_fused_clamp_div_gather_sub_2 = async_compile.triton('triton_poi_fused_clamp_div_gather_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_gather_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_gather_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp1 - tmp2
tmp4 = tl.full([XBLOCK], 4, tl.int32)
tmp5 = tmp3 + tmp4
tmp6 = tmp3 < 0
tmp7 = tl.where(tmp6, tmp5, tmp3)
tl.device_assert(((0 <= tmp7) & (tmp7 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp7 < 4")
tmp9 = tl.load(in_ptr2 + (tmp7 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = 1.0
tmp11 = tmp9 - tmp10
tmp12 = tmp1.to(tl.float32)
tmp13 = tmp11 / tmp12
tmp14 = tmp0 - tmp13
tmp15 = 0.0
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, X, sort, cumsum], Original ATen: [aten.max, aten.sub, aten.sort, aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused_cumsum_max_sort_sub_0.run(arg0_1, buf0, buf1, buf3, 64, 4, grid=grid(64), stream=stream0)
del arg0_1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [mul, topk_cumsum, support, sum_1], Original ATen: [aten.mul, aten.sub, aten.gt, aten.sum]
triton_poi_fused_gt_mul_sub_sum_1.run(buf1, buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [topk_cumsum, sub_2, tau, tau_1, sub_3, output], Original ATen: [aten.sub, aten.gather, aten.div, aten.clamp]
triton_poi_fused_clamp_div_gather_sub_2.run(buf0, buf4, buf3, buf5, 256, grid=grid(256), stream=stream0)
del buf0
del buf3
del buf4
return (buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
def _make_ix_like(X, dim):
d = X.size(dim)
rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)
view = [1] * X.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _roll_last(X, dim):
if dim == -1:
return X
elif dim < 0:
dim = X.dim() - dim
perm = [i for i in range(X.dim()) if i != dim] + [dim]
return X.permute(perm)
def _sparsemax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for sparsemax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]:
topk, _ = torch.sort(X, dim=dim, descending=True)
else:
topk, _ = torch.topk(X, k=k, dim=dim)
topk_cumsum = topk.cumsum(dim) - 1
rhos = _make_ix_like(topk, dim)
support = rhos * topk > topk_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = topk_cumsum.gather(dim, support_size - 1)
tau /= support_size
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
in_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k)
_roll_last(tau, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau, support_size
def sparsemax(X, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a la softmax).
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxFunction.apply(X, dim, k)
class SparsemaxFunction(Function):
@classmethod
def forward(cls, ctx, X, dim=-1, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val
tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k)
output = torch.clamp(X - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@classmethod
def backward(cls, ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None, None
class Sparsemax(nn.Module):
def __init__(self, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a la softmax).
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
"""
self.dim = dim
self.k = k
super(Sparsemax, self).__init__()
def forward(self, X):
return sparsemax(X, dim=self.dim, k=self.k)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_max_sort_sub_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 64
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = r1
tmp10 = tmp9.to(tl.int16)
tmp11 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp12 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13, _tmp14 = triton_helpers.sort_with_index(tmp11, tmp12, None, 1,
stable=False, descending=True)
tmp15 = tmp13.to(tl.float32)
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp17, = tl.associative_scan((tmp16,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp8, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp13, xmask)
tl.store(out_ptr2 + (r1 + 4 * x0), tmp17, xmask)
@triton.jit
def triton_poi_fused_gt_mul_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp1 * tmp0
tmp4 = tmp3 - tmp1
tmp5 = tmp2 > tmp4
tmp6 = tmp5.to(tl.int64)
tmp8 = 2.0
tmp9 = tmp8 * tmp7
tmp11 = tmp10 - tmp1
tmp12 = tmp9 > tmp11
tmp13 = tmp12.to(tl.int64)
tmp14 = tmp6 + tmp13
tmp16 = 3.0
tmp17 = tmp16 * tmp15
tmp19 = tmp18 - tmp1
tmp20 = tmp17 > tmp19
tmp21 = tmp20.to(tl.int64)
tmp22 = tmp14 + tmp21
tmp24 = 4.0
tmp25 = tmp24 * tmp23
tmp27 = tmp26 - tmp1
tmp28 = tmp25 > tmp27
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + x0, tmp30, xmask)
@triton.jit
def triton_poi_fused_clamp_div_gather_sub_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.full([1], 1, tl.int64)
tmp3 = tmp1 - tmp2
tmp4 = tl.full([XBLOCK], 4, tl.int32)
tmp5 = tmp3 + tmp4
tmp6 = tmp3 < 0
tmp7 = tl.where(tmp6, tmp5, tmp3)
tl.device_assert((0 <= tmp7) & (tmp7 < 4) | ~xmask,
'index out of bounds: 0 <= tmp7 < 4')
tmp9 = tl.load(in_ptr2 + (tmp7 + 4 * x1), xmask, eviction_policy=
'evict_last')
tmp10 = 1.0
tmp11 = tmp9 - tmp10
tmp12 = tmp1.to(tl.float32)
tmp13 = tmp11 / tmp12
tmp14 = tmp0 - tmp13
tmp15 = 0.0
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_max_sort_sub_0[grid(64)](arg0_1, buf0, buf1,
buf3, 64, 4, XBLOCK=32, num_warps=2, num_stages=1)
del arg0_1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.int64)
triton_poi_fused_gt_mul_sub_sum_1[grid(64)](buf1, buf3, buf4, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf5 = buf1
del buf1
triton_poi_fused_clamp_div_gather_sub_2[grid(256)](buf0, buf4, buf3,
buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf3
del buf4
return buf5,
def _make_ix_like(X, dim):
d = X.size(dim)
rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)
view = [1] * X.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _roll_last(X, dim):
if dim == -1:
return X
elif dim < 0:
dim = X.dim() - dim
perm = [i for i in range(X.dim()) if i != dim] + [dim]
return X.permute(perm)
def _sparsemax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for sparsemax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]:
topk, _ = torch.sort(X, dim=dim, descending=True)
else:
topk, _ = torch.topk(X, k=k, dim=dim)
topk_cumsum = topk.cumsum(dim) - 1
rhos = _make_ix_like(topk, dim)
support = rhos * topk > topk_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = topk_cumsum.gather(dim, support_size - 1)
tau /= support_size
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
in_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k)
_roll_last(tau, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau, support_size
def sparsemax(X, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a la softmax).
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxFunction.apply(X, dim, k)
class SparsemaxFunction(Function):
@classmethod
def forward(cls, ctx, X, dim=-1, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val
tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k)
output = torch.clamp(X - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@classmethod
def backward(cls, ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None, None
class SparsemaxNew(nn.Module):
def __init__(self, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a la softmax).
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
"""
self.dim = dim
self.k = k
super(SparsemaxNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| roholazandie/entmax | Sparsemax | false | 7,587 | [
"MIT"
] | 1 | 657374e6a792ec6840b6f78bc759cc1f51570aad | https://github.com/roholazandie/entmax/tree/657374e6a792ec6840b6f78bc759cc1f51570aad | from torch.autograd import Function
import torch
import torch.nn as nn
def _make_ix_like(X, dim):
d = X.size(dim)
rho = torch.arange(1, d + 1, device=X.device, dtype=X.dtype)
view = [1] * X.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _roll_last(X, dim):
if dim == -1:
return X
elif dim < 0:
dim = X.dim() - dim
perm = [i for i in range(X.dim()) if i != dim] + [dim]
return X.permute(perm)
def _sparsemax_threshold_and_support(X, dim=-1, k=None):
"""Core computation for sparsemax: optimal threshold and support size.
Parameters
----------
X : torch.Tensor
The input tensor to compute thresholds over.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
tau : torch.Tensor like `X`, with all but the `dim` dimension intact
the threshold value for each vector
support_size : torch LongTensor, shape like `tau`
the number of nonzeros in each vector.
"""
if k is None or k >= X.shape[dim]:
topk, _ = torch.sort(X, dim=dim, descending=True)
else:
topk, _ = torch.topk(X, k=k, dim=dim)
topk_cumsum = topk.cumsum(dim) - 1
rhos = _make_ix_like(topk, dim)
support = rhos * topk > topk_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = topk_cumsum.gather(dim, support_size - 1)
tau /= support_size
if k is not None and k < X.shape[dim]:
unsolved = (support_size == k).squeeze(dim)
if torch.any(unsolved):
in_ = _roll_last(X, dim)[unsolved]
tau_, ss_ = _sparsemax_threshold_and_support(in_, dim=-1, k=2 * k)
_roll_last(tau, dim)[unsolved] = tau_
_roll_last(support_size, dim)[unsolved] = ss_
return tau, support_size
def sparsemax(X, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a la softmax).
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
k : int or None
number of largest elements to partial-sort over. For optimal
performance, should be slightly bigger than the expected number of
nonzeros in the solution. If the solution is more than k-sparse,
this function is recursively called with a 2*k schedule.
If `None`, full sorting is performed from the beginning.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxFunction.apply(X, dim, k)
class SparsemaxFunction(Function):
@classmethod
def forward(cls, ctx, X, dim=-1, k=None):
ctx.dim = dim
max_val, _ = X.max(dim=dim, keepdim=True)
X = X - max_val
tau, supp_size = _sparsemax_threshold_and_support(X, dim=dim, k=k)
output = torch.clamp(X - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@classmethod
def backward(cls, ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None, None
class Model(nn.Module):
def __init__(self, dim=-1, k=None):
"""sparsemax: normalizing sparse transform (a l
# ... truncated (>4000 chars) for memory efficiency |
L0Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xs/cxsn7ndoxhfjkrfmkyd7usdf7apmgkruojcr5twggwofsfe7vmoh.py
# Topologically Sorted Source Nodes: [sub, abs_1, add, loss, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.pow, aten.mean]
# Source node to ATen node mapping:
# abs_1 => abs_1
# add => add
# loss => pow_1
# mean => mean
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1e-08), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_abs_add_mean_pow_sub_0 = async_compile.triton('triton_per_fused_abs_add_mean_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, abs_1, add, loss, mean], Original ATen: [aten.sub, aten.abs, aten.add, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_mean_pow_sub_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class L0Loss(nn.Module):
"""L0loss from
"Noise2Noise: Learning Image Restoration without Clean Data"
<https://arxiv.org/pdf/1803.04189>`_ paper.
"""
def __init__(self, gamma=2, eps=1e-08):
super(L0Loss, self).__init__()
self.gamma = gamma
self.eps = eps
def forward(self, pred, target):
loss = (torch.abs(pred - target) + self.eps).pow(self.gamma)
return torch.mean(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mean_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1e-08
tmp5 = tmp3 + tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = 256.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_mean_pow_sub_0[grid(1)](buf1, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class L0LossNew(nn.Module):
"""L0loss from
"Noise2Noise: Learning Image Restoration without Clean Data"
<https://arxiv.org/pdf/1803.04189>`_ paper.
"""
def __init__(self, gamma=2, eps=1e-08):
super(L0LossNew, self).__init__()
self.gamma = gamma
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| sailfish009/torch-toolbox | L0Loss | false | 7,588 | [
"BSD-3-Clause"
] | 1 | 80dfc22c697b9f323e097de72af04f0e5435d7b4 | https://github.com/sailfish009/torch-toolbox/tree/80dfc22c697b9f323e097de72af04f0e5435d7b4 | import torch
from torch import nn
class Model(nn.Module):
"""L0loss from
"Noise2Noise: Learning Image Restoration without Clean Data"
<https://arxiv.org/pdf/1803.04189>`_ paper.
"""
def __init__(self, gamma=2, eps=1e-08):
super().__init__()
self.gamma = gamma
self.eps = eps
def forward(self, pred, target):
loss = (torch.abs(pred - target) + self.eps).pow(self.gamma)
return torch.mean(loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BertSelfOutput | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ss/cssn3ayzwsxbizosd6ieezxafjef3fxscx57lbnlxbdiuph3p2je.py
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l6/cl6vibrzoyykzmbhmvlsdaksh3k2diif7eg66z2ho46tjsy6emma.py
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2j/c2j3iqfivxp4slgdp5wwpyv7egzeitk5o2kiyeptosrthyamlxof.py
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# hidden_states_2 => add_2
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-12
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5,
buf2, primals_6, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class BertSelfOutputNew(nn.Module):
def __init__(self, config):
super(BertSelfOutputNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.gamma
primals_6 = self.LayerNorm.beta
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| BLimmie/pytorch-pretrained-BERT | BertSelfOutput | false | 7,589 | [
"Apache-2.0"
] | 1 | 2ac4b29641e569020ed2acc28016f481f617052b | https://github.com/BLimmie/pytorch-pretrained-BERT/tree/2ac4b29641e569020ed2acc28016f481f617052b | from _paritybench_helpers import _mock_config
import torch
from torch import nn
class BertLayerNorm(nn.Module):
def __init__(self, config, variance_epsilon=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.gamma = nn.Parameter(torch.ones(config.hidden_size))
self.beta = nn.Parameter(torch.zeros(config.hidden_size))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
|
LossPredictionLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/no/cno5volk73zd2bft3tqji7bjbkwnhv7zy2scykviixz5rie5viyd.py
# Topologically Sorted Source Nodes: [clamp, sign, mul, one, mul_1, sub_3, clamp_1, loss, loss_1], Original ATen: [aten.clamp, aten.sign, aten.mul, aten.sub, aten.rsub, aten.sum, aten.div]
# Source node to ATen node mapping:
# clamp => clamp_min
# clamp_1 => clamp_min_1
# loss => sum_1
# loss_1 => div
# mul => mul
# mul_1 => mul_1
# one => sub_2
# sign => sign
# sub_3 => sub_3
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%slice_2, 0), kwargs = {})
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%clamp_min,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sign, 2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %slice_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 2), kwargs = {})
triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0 = async_compile.triton('triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 128],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 128
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
r1 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.load(in_ptr0 + (192 + r0 + ((-64)*r1)), None)
tmp16 = tl.load(in_ptr1 + (r2), None)
tmp17 = tl.load(in_ptr1 + (192 + r0 + ((-64)*r1)), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = tmp5 < tmp4
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp4 < tmp5
tmp9 = tmp8.to(tl.int8)
tmp10 = tmp7 - tmp9
tmp11 = tmp10.to(tmp4.dtype)
tmp12 = 2.0
tmp13 = tmp11 * tmp12
tmp14 = 1.0
tmp15 = tmp13 - tmp14
tmp18 = tmp16 - tmp17
tmp19 = tmp15 * tmp18
tmp20 = tmp14 - tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [clamp, sign, mul, one, mul_1, sub_3, clamp_1, loss, loss_1], Original ATen: [aten.clamp, aten.sign, aten.mul, aten.sub, aten.rsub, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0.run(buf1, arg1_1, arg0_1, 1, 128, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LossPredictionLoss(nn.Module):
def __init__(self, margin=1.0):
super(LossPredictionLoss, self).__init__()
self.margin = margin
def forward(self, input, target):
input = (input - input.flip(0))[:len(input) // 2]
target = (target - target.flip(0))[:len(target) // 2]
target = target.detach()
one = 2 * torch.sign(torch.clamp(target, min=0)) - 1
loss = torch.sum(torch.clamp(self.margin - one * input, min=0))
loss = loss / input.size(0)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 128
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r0 = rindex % 64
r1 = rindex // 64
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.load(in_ptr0 + (192 + r0 + -64 * r1), None)
tmp16 = tl.load(in_ptr1 + r2, None)
tmp17 = tl.load(in_ptr1 + (192 + r0 + -64 * r1), None)
tmp2 = tmp0 - tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = tl.full([1, 1], 0, tl.int32)
tmp6 = tmp5 < tmp4
tmp7 = tmp6.to(tl.int8)
tmp8 = tmp4 < tmp5
tmp9 = tmp8.to(tl.int8)
tmp10 = tmp7 - tmp9
tmp11 = tmp10.to(tmp4.dtype)
tmp12 = 2.0
tmp13 = tmp11 * tmp12
tmp14 = 1.0
tmp15 = tmp13 - tmp14
tmp18 = tmp16 - tmp17
tmp19 = tmp15 * tmp18
tmp20 = tmp14 - tmp19
tmp21 = triton_helpers.maximum(tmp20, tmp3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK, RBLOCK])
tmp24 = tl.sum(tmp22, 1)[:, None]
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_clamp_div_mul_rsub_sign_sub_sum_0[grid(1)](buf1,
arg1_1, arg0_1, 1, 128, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LossPredictionLossNew(nn.Module):
def __init__(self, margin=1.0):
super(LossPredictionLossNew, self).__init__()
self.margin = margin
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| saksman/deepfish_adaptation | LossPredictionLoss | false | 7,590 | [
"MIT"
] | 1 | 0413def87ec1d3cb67fa043a2fb60ef7e0d73539 | https://github.com/saksman/deepfish_adaptation/tree/0413def87ec1d3cb67fa043a2fb60ef7e0d73539 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, margin=1.0):
super().__init__()
self.margin = margin
def forward(self, input, target):
input = (input - input.flip(0))[:len(input) // 2]
target = (target - target.flip(0))[:len(target) // 2]
target = target.detach()
one = 2 * torch.sign(torch.clamp(target, min=0)) - 1
loss = torch.sum(torch.clamp(self.margin - one * input, min=0))
loss = loss / input.size(0)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SmallMaskNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zj/czju3ep7df3m7twyv4etkwwr67tqpgia5pggjumz5hb763zuye2s.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xb/cxbwrbrs4csuyzfi5yt2te7ub7aqn4kjp5ikscfm7ukmq7wxeovu.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.1), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 16
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 2048, grid=grid(2048), stream=stream0)
del buf0
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1))
buf4 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 1024, grid=grid(1024), stream=stream0)
del buf3
del primals_5
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf7, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2, buf4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SmallMaskNet(nn.Module):
"""A three-layer network for predicting mask"""
def __init__(self, input, output):
super(SmallMaskNet, self).__init__()
self.conv1 = nn.Conv2d(input, 32, 5, padding=2)
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, output, 3, padding=1)
def forward(self, x):
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
x = self.conv3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input': 4, 'output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (4, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 4, 4), (512, 16, 4, 1))
buf1 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 4, 4), (512, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(2048)](buf0,
primals_2, buf1, buf2, 2048, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 16, 4, 4), (256, 16, 4, 1))
buf4 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
triton_poi_fused_convolution_leaky_relu_1[grid(1024)](buf3,
primals_5, buf4, buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_2[grid(256)](buf7, primals_7, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
return (buf7, primals_1, primals_3, primals_4, primals_6, buf1, buf2,
buf4, buf5)
class SmallMaskNetNew(nn.Module):
"""A three-layer network for predicting mask"""
def __init__(self, input, output):
super(SmallMaskNetNew, self).__init__()
self.conv1 = nn.Conv2d(input, 32, 5, padding=2)
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, output, 3, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| saikatdutta/NME-VFI | SmallMaskNet | false | 7,591 | [
"Apache-2.0"
] | 1 | 5915e2336ea3ed7113a9c6a91bbc7f6b5deaac17 | https://github.com/saikatdutta/NME-VFI/tree/5915e2336ea3ed7113a9c6a91bbc7f6b5deaac17 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""A three-layer network for predicting mask"""
def __init__(self, input, output):
super().__init__()
self.conv1 = nn.Conv2d(input, 32, 5, padding=2)
self.conv2 = nn.Conv2d(32, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, output, 3, padding=1)
def forward(self, x):
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
x = self.conv3(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ContinuousEmbeddings | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/qt/cqtq5mmknxrvgzjk2sifabpjul5xesssk35wqhyxwjtasnoyfjil.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# x => mul
# x_1 => add
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %unsqueeze_2), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4, 4), (64, 16, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_3
return (buf0, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
def _get_activation_fn(activation):
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu':
return nn.LeakyReLU(inplace=True)
elif activation == 'gelu':
return nn.GELU()
elif activation == 'geglu':
return GEGLU()
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class ContinuousEmbeddings(nn.Module):
def __init__(self, n_cont_cols: 'int', embed_dim: 'int', activation:
'str'=None, bias: 'bool'=True):
super(ContinuousEmbeddings, self).__init__()
self.n_cont_cols = n_cont_cols
self.embed_dim = embed_dim
self.activation = activation
self.weight = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim))
self.bias = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim)
) if bias else None
self._reset_parameters()
self.act_fn = _get_activation_fn(activation) if activation else None
def _reset_parameters(self) ->None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: 'Tensor') ->Tensor:
x = self.weight.unsqueeze(0) * X.unsqueeze(2)
if self.bias is not None:
x = x + self.bias.unsqueeze(0)
if self.act_fn is not None:
x = self.act_fn(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_cont_cols': 4, 'embed_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4, 4), (64, 16, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(256)](primals_1, primals_2,
primals_3, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_3
return buf0, primals_2
def _get_activation_fn(activation):
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu':
return nn.LeakyReLU(inplace=True)
elif activation == 'gelu':
return nn.GELU()
elif activation == 'geglu':
return GEGLU()
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class ContinuousEmbeddingsNew(nn.Module):
def __init__(self, n_cont_cols: 'int', embed_dim: 'int', activation:
'str'=None, bias: 'bool'=True):
super(ContinuousEmbeddingsNew, self).__init__()
self.n_cont_cols = n_cont_cols
self.embed_dim = embed_dim
self.activation = activation
self.weight = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim))
self.bias = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim)
) if bias else None
self._reset_parameters()
self.act_fn = _get_activation_fn(activation) if activation else None
def _reset_parameters(self) ->None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sallypannn/pytorch-widedeep | ContinuousEmbeddings | false | 7,592 | [
"MIT"
] | 1 | ab4a209a2a3bff539f543a66ac51306042ed6693 | https://github.com/sallypannn/pytorch-widedeep/tree/ab4a209a2a3bff539f543a66ac51306042ed6693 | import math
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
def _get_activation_fn(activation):
if activation == 'relu':
return nn.ReLU(inplace=True)
if activation == 'leaky_relu':
return nn.LeakyReLU(inplace=True)
elif activation == 'gelu':
return nn.GELU()
elif activation == 'geglu':
return GEGLU()
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class Model(nn.Module):
def __init__(self, n_cont_cols: 'int', embed_dim: 'int', activation:
'str'=None, bias: 'bool'=True):
super().__init__()
self.n_cont_cols = n_cont_cols
self.embed_dim = embed_dim
self.activation = activation
self.weight = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim))
self.bias = nn.Parameter(torch.Tensor(n_cont_cols, embed_dim)
) if bias else None
self._reset_parameters()
self.act_fn = _get_activation_fn(activation) if activation else None
def _reset_parameters(self) ->None:
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: 'Tensor') ->Tensor:
x = self.weight.unsqueeze(0) * X.unsqueeze(2)
if self.bias is not None:
x = x + self.bias.unsqueeze(0)
if self.act_fn is not None:
x = self.act_fn(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/el/cel3ti6ei3rprs2l5m6qs62p6md67qhlcbr3oxhxsqfmherljfbo.py
# Topologically Sorted Source Nodes: [H1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# H1 => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (256, 784), (784, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (10, 256), (256, 1))
assert_size_stride(primals_5, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 256), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [H1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (256, 10), (1, 256), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (buf2, primals_1, buf1, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional
class Net(nn.Module):
def __init__(self, num_inputs=784, num_outputs=10, num_hiddens=256,
is_training=True):
super(Net, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_hiddens = num_hiddens
self.linear_1 = nn.Linear(num_inputs, num_hiddens)
self.linear_2 = nn.Linear(num_hiddens, num_outputs)
self.relu = nn.ReLU()
def forward(self, X):
X = X.reshape((-1, self.num_inputs))
H1 = self.relu(self.linear_1(X))
out = self.linear_2(H1)
return out
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (256, 784), (784, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (10, 256), (256, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
256), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1024)](buf1, primals_3, 1024, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(256, 10), (1, 256), 0), alpha=1, beta=1, out=buf2)
del primals_5
return buf2, primals_1, buf1, primals_4
class NetNew(nn.Module):
def __init__(self, num_inputs=784, num_outputs=10, num_hiddens=256,
is_training=True):
super(NetNew, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_hiddens = num_hiddens
self.linear_1 = nn.Linear(num_inputs, num_hiddens)
self.linear_2 = nn.Linear(num_hiddens, num_outputs)
self.relu = nn.ReLU()
def forward(self, input_0):
primals_2 = self.linear_1.weight
primals_3 = self.linear_1.bias
primals_4 = self.linear_2.weight
primals_5 = self.linear_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| samjz06/d2l-pytorch | Net | false | 7,593 | [
"Apache-2.0"
] | 1 | 80eca3f7d217eefb4f6ae08aae24c6a3c2714898 | https://github.com/samjz06/d2l-pytorch/tree/80eca3f7d217eefb4f6ae08aae24c6a3c2714898 | import torch
import torch.nn as nn
import torch.nn.functional
class Model(nn.Module):
def __init__(self, num_inputs=784, num_outputs=10, num_hiddens=256,
is_training=True):
super().__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_hiddens = num_hiddens
self.linear_1 = nn.Linear(num_inputs, num_hiddens)
self.linear_2 = nn.Linear(num_hiddens, num_outputs)
self.relu = nn.ReLU()
def forward(self, X):
X = X.reshape((-1, self.num_inputs))
H1 = self.relu(self.linear_1(X))
out = self.linear_2(H1)
return out
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return []
|
L2Softmax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/vc/cvc3nksoxathxuwuv5ctikhvrgby37ipqexls2twlnjpxwgtnvyk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 1), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp16 = 1.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nj/cnj3unyqgypcusuwkvrket2onfhnggwty5ybu3qvwwrdsjlzy5e3.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 4), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/js/cjss65qmcxnfhiofwf7byaqh2nn3lfi6gry3b6wa6wqt5qkfnnec.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
# Source node to ATen node mapping:
# cross_entropy => div_1, exp, log, mul_1, neg, sub_1, sum_2, sum_3
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %log), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg1_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_3,), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Scalar](args = (%neg, 64), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_2 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (r3), None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_2.run(buf3, buf1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del buf1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch.nn import functional as F
from torch.nn.modules.loss import _WeightedLoss
class L2Softmax(_WeightedLoss):
"""L2Softmax from
`"L2-constrained Softmax Loss for Discriminative Face Verification"
<https://arxiv.org/abs/1703.09507>`_ paper.
Parameters
----------
classes: int.
Number of classes.
alpha: float.
The scaling parameter, a hypersphere with small alpha
will limit surface area for embedding features.
p: float, default is 0.9.
The expected average softmax probability for correctly
classifying a feature.
from_normx: bool, default is False.
Whether input has already been normalized.
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimensions other than
batch_axis are averaged out.
"""
def __init__(self, classes, alpha, p=0.9, from_normx=False, weight=None,
size_average=None, ignore_index=-100, reduce=None, reduction='mean'):
super(L2Softmax, self).__init__(weight, size_average, reduce, reduction
)
alpha_low = math.log(p * (classes - 2) / (1 - p))
assert alpha > alpha_low, 'For given probability of p={}, alpha should higher than {}.'.format(
p, alpha_low)
self.ignore_index = ignore_index
self.alpha = alpha
self.from_normx = from_normx
def forward(self, x, target):
if not self.from_normx:
x = F.normalize(x, 2, dim=-1)
x = x * self.alpha
return F.cross_entropy(x, target, weight=self.weight, ignore_index=
self.ignore_index, reduction=self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'classes': 4, 'alpha': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch.nn.modules.loss import _WeightedLoss
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tmp16 = 1.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = 4.0
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_2(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr1 + r3, None)
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tmp15 = tmp13 * tmp14
tmp16 = tl.broadcast_to(tmp15, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = -tmp18
tmp20 = 0.015625
tmp21 = tmp19 * tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp21, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = buf2
del buf2
triton_per_fused__log_softmax_div_mul_neg_sum_2[grid(1)](buf3, buf1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg1_1
del buf1
return buf3,
class L2SoftmaxNew(_WeightedLoss):
"""L2Softmax from
`"L2-constrained Softmax Loss for Discriminative Face Verification"
<https://arxiv.org/abs/1703.09507>`_ paper.
Parameters
----------
classes: int.
Number of classes.
alpha: float.
The scaling parameter, a hypersphere with small alpha
will limit surface area for embedding features.
p: float, default is 0.9.
The expected average softmax probability for correctly
classifying a feature.
from_normx: bool, default is False.
Whether input has already been normalized.
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimensions other than
batch_axis are averaged out.
"""
def __init__(self, classes, alpha, p=0.9, from_normx=False, weight=None,
size_average=None, ignore_index=-100, reduce=None, reduction='mean'):
super(L2SoftmaxNew, self).__init__(weight, size_average, reduce,
reduction)
alpha_low = math.log(p * (classes - 2) / (1 - p))
assert alpha > alpha_low, 'For given probability of p={}, alpha should higher than {}.'.format(
p, alpha_low)
self.ignore_index = ignore_index
self.alpha = alpha
self.from_normx = from_normx
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| sailfish009/torch-toolbox | L2Softmax | false | 7,594 | [
"BSD-3-Clause"
] | 1 | 80dfc22c697b9f323e097de72af04f0e5435d7b4 | https://github.com/sailfish009/torch-toolbox/tree/80dfc22c697b9f323e097de72af04f0e5435d7b4 | import math
import torch
from torch.nn import functional as F
from torch.nn.modules.loss import _WeightedLoss
class Model(_WeightedLoss):
"""L2Softmax from
`"L2-constrained Softmax Loss for Discriminative Face Verification"
<https://arxiv.org/abs/1703.09507>`_ paper.
Parameters
----------
classes: int.
Number of classes.
alpha: float.
The scaling parameter, a hypersphere with small alpha
will limit surface area for embedding features.
p: float, default is 0.9.
The expected average softmax probability for correctly
classifying a feature.
from_normx: bool, default is False.
Whether input has already been normalized.
Outputs:
- **loss**: loss tensor with shape (batch_size,). Dimensions other than
batch_axis are averaged out.
"""
def __init__(self, classes, alpha, p=0.9, from_normx=False, weight=None,
size_average=None, ignore_index=-100, reduce=None, reduction='mean'):
super().__init__(weight, size_average, reduce, reduction
)
alpha_low = math.log(p * (classes - 2) / (1 - p))
assert alpha > alpha_low, 'For given probability of p={}, alpha should higher than {}.'.format(
p, alpha_low)
self.ignore_index = ignore_index
self.alpha = alpha
self.from_normx = from_normx
def forward(self, x, target):
if not self.from_normx:
x = F.normalize(x, 2, dim=-1)
x = x * self.alpha
return F.cross_entropy(x, target, weight=self.weight, ignore_index=
self.ignore_index, reduction=self.reduction)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Conv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wl/cwldpc2k6v7rbizd6tlddleva3alwxblabsherkqjtef5e45djwk.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4n/c4nv2qrk3qesfzhmmlvg6nswa4xauhqhvv4tkc4vcrmocpm2vugt.py
# Topologically Sorted Source Nodes: [output, output_1, output_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output => convolution
# output_1 => add, repeat, rsqrt, var_mean
# output_2 => relu
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + (25*x0)), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 25, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 25.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + (25*x0)), tmp3, rmask & xmask)
tl.store(out_ptr3 + (r3 + (25*x0)), tmp31, rmask & xmask)
tl.store(out_ptr4 + (r3 + (25*x0)), tmp33, rmask & xmask)
tl.store(out_ptr5 + (x0), tmp25, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf3 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf2 = buf1; del buf1 # reuse
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [output, output_1, output_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1.run(buf2, primals_4, primals_3, primals_5, buf3, buf4, buf8, buf9, buf7, 16, 25, grid=grid(16), stream=stream0)
del primals_3
del primals_4
del primals_5
return (buf8, primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16, ), (1, ), 0), buf9, reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn.functional import interpolate
from typing import cast
class Interpolate(nn.Module):
def __init__(self, scale_factor: 'float'=1.0, mode: 'str'='nearest'
) ->None:
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return cast(torch.Tensor, interpolate(input, scale_factor=self.
scale_factor, mode=self.mode))
def extra_repr(self) ->str:
extras = [f'scale_factor={self.scale_factor}']
if self.mode != 'nearest':
extras.append(f'mode={self.mode}')
return ', '.join(extras)
class Conv(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', *, stride: int=1, upsample: bool=False, norm: bool=True,
activation: bool=True):
super().__init__()
self.upsample = Interpolate(scale_factor=stride) if upsample else None
self.pad = nn.ReflectionPad2d(kernel_size // 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1 if upsample else stride)
self.norm = nn.InstanceNorm2d(out_channels, affine=True
) if norm else None
self.activation = nn.ReLU() if activation else None
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
if self.upsample:
input = self.upsample(input)
output = self.conv(self.pad(input))
if self.norm:
output = self.norm(output)
if self.activation:
output = self.activation(output)
return cast(torch.Tensor, output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
from torch.nn.functional import interpolate
from typing import cast
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3,
out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 25 * x0), rmask & xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 25, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(rmask & xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 25.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 25 * x0), tmp3, rmask & xmask)
tl.store(out_ptr3 + (r3 + 25 * x0), tmp31, rmask & xmask)
tl.store(out_ptr4 + (r3 + 25 * x0), tmp33, rmask & xmask)
tl.store(out_ptr5 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = buf1
del buf1
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf9 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_threshold_backward_1[
grid(16)](buf2, primals_4, primals_3, primals_5, buf3, buf4,
buf8, buf9, buf7, 16, 25, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
del primals_4
del primals_5
return buf8, primals_2, buf0, buf2, buf3, reinterpret_tensor(buf7, (16,
), (1,), 0), buf9, reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1,
1, 1), 0)
class Interpolate(nn.Module):
def __init__(self, scale_factor: 'float'=1.0, mode: 'str'='nearest'
) ->None:
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return cast(torch.Tensor, interpolate(input, scale_factor=self.
scale_factor, mode=self.mode))
def extra_repr(self) ->str:
extras = [f'scale_factor={self.scale_factor}']
if self.mode != 'nearest':
extras.append(f'mode={self.mode}')
return ', '.join(extras)
class ConvNew(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', *, stride: int=1, upsample: bool=False, norm: bool=True,
activation: bool=True):
super().__init__()
self.upsample = Interpolate(scale_factor=stride) if upsample else None
self.pad = nn.ReflectionPad2d(kernel_size // 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1 if upsample else stride)
self.norm = nn.InstanceNorm2d(out_channels, affine=True
) if norm else None
self.activation = nn.ReLU() if activation else None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_4 = self.norm.weight
primals_5 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| sakshi-06/pystiche | Conv | false | 7,595 | [
"BSD-3-Clause"
] | 1 | 21a67364b332a34a2308a929f200900c76be5b73 | https://github.com/sakshi-06/pystiche/tree/21a67364b332a34a2308a929f200900c76be5b73 | import torch
from torch import nn
from torch.nn.functional import interpolate
from typing import cast
class Interpolate(nn.Module):
def __init__(self, scale_factor: 'float'=1.0, mode: 'str'='nearest'
) ->None:
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return cast(torch.Tensor, interpolate(input, scale_factor=self.
scale_factor, mode=self.mode))
def extra_repr(self) ->str:
extras = [f'scale_factor={self.scale_factor}']
if self.mode != 'nearest':
extras.append(f'mode={self.mode}')
return ', '.join(extras)
class Model(nn.Module):
def __init__(self, in_channels: 'int', out_channels: 'int', kernel_size:
'int', *, stride: int=1, upsample: bool=False, norm: bool=True,
activation: bool=True):
super().__init__()
self.upsample = Interpolate(scale_factor=stride) if upsample else None
self.pad = nn.ReflectionPad2d(kernel_size // 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1 if upsample else stride)
self.norm = nn.InstanceNorm2d(out_channels, affine=True
) if norm else None
self.activation = nn.ReLU() if activation else None
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
if self.upsample:
input = self.upsample(input)
output = self.conv(self.pad(input))
if self.norm:
output = self.norm(output)
if self.activation:
output = self.activation(output)
return cast(torch.Tensor, output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Wide | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wi/cwibqvrnbfx7xhnfzzckhfwxbmmaeepyx4l2irzdxw23feqjr3lp.py
# Topologically Sorted Source Nodes: [long], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# long => convert_element_type
# Graph fragment:
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%primals_1, torch.int64), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0.to(tl.int64)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sj/csjoftoytknoektasbm3r3u6wambt2s3chynhpfivuojebg4btx7.py
# Topologically Sorted Source Nodes: [embedding, sum_1, out], Original ATen: [aten.embedding, aten.sum, aten.add]
# Source node to ATen node mapping:
# embedding => embedding
# out => add
# sum_1 => sum_1
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %convert_element_type, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%embedding, [1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %primals_3), kwargs = {})
triton_poi_fused_add_embedding_sum_1 = async_compile.triton('triton_poi_fused_add_embedding_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_embedding_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_embedding_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp21 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp28 = tl.load(in_ptr2 + (0))
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp1 = tl.full([XBLOCK], 5, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 5)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 5")
tmp6 = tl.load(in_ptr1 + (tmp4), xmask, eviction_policy='evict_last')
tmp8 = tmp7 + tmp1
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert(((0 <= tmp10) & (tmp10 < 5)) | ~(xmask), "index out of bounds: 0 <= tmp10 < 5")
tmp12 = tl.load(in_ptr1 + (tmp10), xmask, eviction_policy='evict_last')
tmp13 = tmp6 + tmp12
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tl.device_assert(((0 <= tmp17) & (tmp17 < 5)) | ~(xmask), "index out of bounds: 0 <= tmp17 < 5")
tmp19 = tl.load(in_ptr1 + (tmp17), xmask, eviction_policy='evict_last')
tmp20 = tmp13 + tmp19
tmp22 = tmp21 + tmp1
tmp23 = tmp21 < 0
tmp24 = tl.where(tmp23, tmp22, tmp21)
tl.device_assert(((0 <= tmp24) & (tmp24 < 5)) | ~(xmask), "index out of bounds: 0 <= tmp24 < 5")
tmp26 = tl.load(in_ptr1 + (tmp24), xmask, eviction_policy='evict_last')
tmp27 = tmp20 + tmp26
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (5, 1), (1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [long], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding, sum_1, out], Original ATen: [aten.embedding, aten.sum, aten.add]
triton_poi_fused_add_embedding_sum_1.run(buf0, primals_2, primals_3, buf1, 64, grid=grid(64), stream=stream0)
del primals_2
del primals_3
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((5, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import Tensor
from torch import nn
class Wide(nn.Module):
"""wide (linear) component
Linear model implemented via an Embedding layer connected to the output
neuron(s).
Parameters
-----------
wide_dim: int
size of the Embedding layer. `wide_dim` is the summation of all the
individual values for all the features that go through the wide
component. For example, if the wide component receives 2 features with
5 individual values each, `wide_dim = 10`
pred_dim: int, default = 1
size of the ouput tensor containing the predictions
Attributes
-----------
wide_linear: ``nn.Module``
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = Wide(wide_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
def __init__(self, wide_dim: 'int', pred_dim: 'int'=1):
super(Wide, self).__init__()
self.wide_linear = nn.Embedding(wide_dim + 1, pred_dim, padding_idx=0)
self.bias = nn.Parameter(torch.zeros(pred_dim))
self._reset_parameters()
def _reset_parameters(self) ->None:
"""initialize Embedding and bias like nn.Linear. See `original
implementation
<https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear>`_.
"""
nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.
weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: 'Tensor') ->Tensor:
"""Forward pass. Simply connecting the Embedding layer with the ouput
neuron(s)"""
out = self.wide_linear(X.long()).sum(dim=1) + self.bias
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'wide_dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.int64)
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_add_embedding_sum_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp7 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp21 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp28 = tl.load(in_ptr2 + 0)
tmp29 = tl.broadcast_to(tmp28, [XBLOCK])
tmp1 = tl.full([XBLOCK], 5, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 5) | ~xmask,
'index out of bounds: 0 <= tmp4 < 5')
tmp6 = tl.load(in_ptr1 + tmp4, xmask, eviction_policy='evict_last')
tmp8 = tmp7 + tmp1
tmp9 = tmp7 < 0
tmp10 = tl.where(tmp9, tmp8, tmp7)
tl.device_assert((0 <= tmp10) & (tmp10 < 5) | ~xmask,
'index out of bounds: 0 <= tmp10 < 5')
tmp12 = tl.load(in_ptr1 + tmp10, xmask, eviction_policy='evict_last')
tmp13 = tmp6 + tmp12
tmp15 = tmp14 + tmp1
tmp16 = tmp14 < 0
tmp17 = tl.where(tmp16, tmp15, tmp14)
tl.device_assert((0 <= tmp17) & (tmp17 < 5) | ~xmask,
'index out of bounds: 0 <= tmp17 < 5')
tmp19 = tl.load(in_ptr1 + tmp17, xmask, eviction_policy='evict_last')
tmp20 = tmp13 + tmp19
tmp22 = tmp21 + tmp1
tmp23 = tmp21 < 0
tmp24 = tl.where(tmp23, tmp22, tmp21)
tl.device_assert((0 <= tmp24) & (tmp24 < 5) | ~xmask,
'index out of bounds: 0 <= tmp24 < 5')
tmp26 = tl.load(in_ptr1 + tmp24, xmask, eviction_policy='evict_last')
tmp27 = tmp20 + tmp26
tmp30 = tmp27 + tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (5, 1), (1, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](primals_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_add_embedding_sum_1[grid(64)](buf0, primals_2,
primals_3, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
del primals_3
return buf1, buf0
class WideNew(nn.Module):
"""wide (linear) component
Linear model implemented via an Embedding layer connected to the output
neuron(s).
Parameters
-----------
wide_dim: int
size of the Embedding layer. `wide_dim` is the summation of all the
individual values for all the features that go through the wide
component. For example, if the wide component receives 2 features with
5 individual values each, `wide_dim = 10`
pred_dim: int, default = 1
size of the ouput tensor containing the predictions
Attributes
-----------
wide_linear: ``nn.Module``
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = Wide(wide_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
def __init__(self, wide_dim: 'int', pred_dim: 'int'=1):
super(WideNew, self).__init__()
self.wide_linear = nn.Embedding(wide_dim + 1, pred_dim, padding_idx=0)
self.bias = nn.Parameter(torch.zeros(pred_dim))
self._reset_parameters()
def _reset_parameters(self) ->None:
"""initialize Embedding and bias like nn.Linear. See `original
implementation
<https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear>`_.
"""
nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.
weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input_0):
primals_3 = self.bias
primals_2 = self.wide_linear.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| sallypannn/pytorch-widedeep | Wide | false | 7,596 | [
"MIT"
] | 1 | ab4a209a2a3bff539f543a66ac51306042ed6693 | https://github.com/sallypannn/pytorch-widedeep/tree/ab4a209a2a3bff539f543a66ac51306042ed6693 | import math
import torch
from torch import Tensor
from torch import nn
class Model(nn.Module):
"""wide (linear) component
Linear model implemented via an Embedding layer connected to the output
neuron(s).
Parameters
-----------
wide_dim: int
size of the Embedding layer. `wide_dim` is the summation of all the
individual values for all the features that go through the wide
component. For example, if the wide component receives 2 features with
5 individual values each, `wide_dim = 10`
pred_dim: int, default = 1
size of the ouput tensor containing the predictions
Attributes
-----------
wide_linear: ``nn.Module``
the linear layer that comprises the wide branch of the model
Examples
--------
>>> import torch
>>> from pytorch_widedeep.models import Wide
>>> X = torch.empty(4, 4).random_(6)
>>> wide = Wide(wide_dim=X.unique().size(0), pred_dim=1)
>>> out = wide(X)
"""
def __init__(self, wide_dim: 'int', pred_dim: 'int'=1):
super().__init__()
self.wide_linear = nn.Embedding(wide_dim + 1, pred_dim, padding_idx=0)
self.bias = nn.Parameter(torch.zeros(pred_dim))
self._reset_parameters()
def _reset_parameters(self) ->None:
"""initialize Embedding and bias like nn.Linear. See `original
implementation
<https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear>`_.
"""
nn.init.kaiming_uniform_(self.wide_linear.weight, a=math.sqrt(5))
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.wide_linear.
weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, X: 'Tensor') ->Tensor:
"""Forward pass. Simply connecting the Embedding layer with the ouput
neuron(s)"""
out = self.wide_linear(X.long()).sum(dim=1) + self.bias
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
ConvMeanPool | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/cp/ccpqyiffqt7li3azr2r3sbi4ru3k2msmtcugjef3hzoik3amsif4.py
# Topologically Sorted Source Nodes: [add, add_1, add_2, add_3, output_1], Original ATen: [aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# output_1 => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, 0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %slice_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %slice_12), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %slice_16), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 4.0), kwargs = {})
triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x4 = (xindex // 2)
x2 = (xindex // 4) % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x4)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x4)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x4)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp6 = tmp5 + tmp1
tmp7 = tmp4 + tmp6
tmp9 = tmp8 + tmp1
tmp10 = tmp7 + tmp9
tmp12 = tmp11 + tmp1
tmp13 = tmp10 + tmp12
tmp14 = 0.25
tmp15 = tmp13 * tmp14
tl.store(out_ptr0 + (x5), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, add_1, add_2, add_3, output_1], Original ATen: [aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_0.run(buf0, primals_2, buf1, 64, grid=grid(64), stream=stream0)
del buf0
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x4 = xindex // 2
x2 = xindex // 4 % 4
x5 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x4), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x4), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x4), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x4), xmask, eviction_policy
='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp6 = tmp5 + tmp1
tmp7 = tmp4 + tmp6
tmp9 = tmp8 + tmp1
tmp10 = tmp7 + tmp9
tmp12 = tmp11 + tmp1
tmp13 = tmp10 + tmp12
tmp14 = 0.25
tmp15 = tmp13 * tmp14
tl.store(out_ptr0 + x5, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](buf0, primals_2, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3
class ConvMeanPoolNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| samsartor/score_sde | ConvMeanPool | false | 7,597 | [
"Apache-2.0"
] | 1 | d25c8d092a68d643c796d771c55f80075aa041d1 | https://github.com/samsartor/score_sde/tree/d25c8d092a68d643c796d771c55f80075aa041d1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
UpsampleConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hg/chgxzii65iyfdwuwk6btc2ievya3atcd4si6chvog45v2d5sekhj.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.pixel_shuffle]
# Source node to ATen node mapping:
# output_1 => clone_2
# Graph fragment:
# %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_pixel_shuffle_0 = async_compile.triton('triton_poi_fused_pixel_shuffle_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pixel_shuffle_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pixel_shuffle_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2) % 4
x2 = (xindex // 8) % 2
x3 = (xindex // 16) % 4
x5 = (xindex // 256)
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*x3) + (16*x0) + (32*x2) + (64*x5)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x6), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mt/cmt4roffhwfg6vw2odjfrgu4bjav3cztqx74kxjfq5igljucibfl.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_2, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.pixel_shuffle]
stream0 = get_raw_stream(0)
triton_poi_fused_pixel_shuffle_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 1024, grid=grid(1024), stream=stream0)
del primals_3
return (buf2, primals_2, reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64, 8, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class UpsampleConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
def forward(self, inputs):
output = inputs
output = torch.cat([output, output, output, output], dim=1)
output = self.pixelshuffle(output)
return self.conv(output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pixel_shuffle_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 4
x2 = xindex // 8 % 2
x3 = xindex // 16 % 4
x5 = xindex // 256
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x3 + 16 * x0 + 32 * x2 + 64 * x5),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x6, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2, 4, 2), (256, 64, 16, 8, 2, 1
), torch.float32)
get_raw_stream(0)
triton_poi_fused_pixel_shuffle_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (4, 4, 8,
8), (256, 64, 8, 1), 0), primals_2, stride=(1, 1), padding=(1,
1), dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 8, 8), (256, 64, 8, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(1024)](buf2, primals_3, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, reinterpret_tensor(buf0, (4, 4, 8, 8), (256, 64,
8, 1), 0)
class UpsampleConvNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| samsartor/score_sde | UpsampleConv | false | 7,598 | [
"Apache-2.0"
] | 1 | d25c8d092a68d643c796d771c55f80075aa041d1 | https://github.com/samsartor/score_sde/tree/d25c8d092a68d643c796d771c55f80075aa041d1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.pixelshuffle = nn.PixelShuffle(upscale_factor=2)
def forward(self, inputs):
output = inputs
output = torch.cat([output, output, output, output], dim=1)
output = self.pixelshuffle(output)
return self.conv(output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MeanPoolConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/pv/cpvgbbhz2ogv4ozvsqtvyvdk455nodd3e7fka6x22igbrbeqmu6h.py
# Topologically Sorted Source Nodes: [add, add_1, add_2, add_3, output], Original ATen: [aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# output => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_4, 0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %slice_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %slice_12), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %slice_16), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_3, 4.0), kwargs = {})
triton_poi_fused_add_div_0 = async_compile.triton('triton_poi_fused_add_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%div, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, add_1, add_2, add_3, output], Original ATen: [aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MeanPoolConv(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
def forward(self, inputs):
output = inputs
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return self.conv(output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 2, 2), (16, 4, 2, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class MeanPoolConvNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| samsartor/score_sde | MeanPoolConv | false | 7,599 | [
"Apache-2.0"
] | 1 | d25c8d092a68d643c796d771c55f80075aa041d1 | https://github.com/samsartor/score_sde/tree/d25c8d092a68d643c796d771c55f80075aa041d1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True):
super().__init__()
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
def forward(self, inputs):
output = inputs
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return self.conv(output)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
MultiHeadAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/rh/crhy6nilvaajphuuoyup37xl4ncuiyrcb3fnt5aboux6wyvcg7ie.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xv/cxv4nnjj2oa6lyulcwkszt5zgwvi477oas5l6ruowipgx4fl7lqm.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention => div_1, exp, sum_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_1 = async_compile.triton('triton_per_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + (16*x0)), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mz/cmzlu2lip25blpsdqeby7ek5757op6xw3pdkxbdediou5szw32tx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax]
triton_per_fused__softmax_1.run(buf5, buf8, 256, 16, grid=grid(256), stream=stream0)
del buf5
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf9, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16, 1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0), out=buf10)
buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf10, buf11, 64, 4, grid=grid(64, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0), buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, d_model, n_heads, dropout):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.fc_q = nn.Linear(d_model, d_model)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.scale = math.sqrt(d_model)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K_t = K.permute(0, 1, 3, 2)
energy = torch.matmul(Q, K_t) / self.scale
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e+18)
attention = torch.softmax(energy, dim=-1)
x = torch.matmul(self.dropout(attention), V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.d_model)
x = self.fc_o(x)
return x, attention
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'n_heads': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused__softmax_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tl_math.exp(tmp9)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.where(xmask, tmp11, 0)
tmp14 = tl.sum(tmp13, 1)[:, None]
tmp15 = tmp10 / tmp14
tl.store(out_ptr2 + (r1 + 16 * x0), tmp15, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 16, 1), (64, 16, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf0, primals_3, buf3, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 16), (64, 16, 16, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 16)](buf1, primals_5, buf4, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 16, 16), (256, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 16, 1), (16, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 16), (16, 0, 1), 0), out=buf5)
buf8 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
triton_per_fused__softmax_1[grid(256)](buf5, buf8, 256, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del buf5
buf9 = reinterpret_tensor(buf1, (4, 4, 16, 1), (64, 16, 1, 1), 0)
del buf1
triton_poi_fused_clone_0[grid(16, 16)](buf2, primals_8, buf9, 16,
16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del primals_8
buf10 = reinterpret_tensor(buf2, (16, 16, 1), (16, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf8, (16, 16, 16), (256, 16,
1), 0), reinterpret_tensor(buf9, (16, 16, 1), (16, 1, 0), 0),
out=buf10)
buf11 = empty_strided_cuda((4, 16, 4, 1), (64, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(64, 4)](buf10, buf11, 64, 4, XBLOCK=4,
YBLOCK=32, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf10, (64, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_11, reinterpret_tensor(buf11, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_11
return reinterpret_tensor(buf12, (4, 16, 4), (64, 4, 1), 0
), buf8, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf8, reinterpret_tensor(buf11, (64, 4), (4, 1), 0
), primals_10, reinterpret_tensor(buf9, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 16), (16, 1, 1), 0
), reinterpret_tensor(buf4, (16, 16, 1), (16, 1, 16), 0)
class MultiHeadAttentionLayerNew(nn.Module):
def __init__(self, d_model, n_heads, dropout):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.fc_q = nn.Linear(d_model, d_model)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.scale = math.sqrt(d_model)
def forward(self, input_0, input_1, input_2):
primals_2 = self.fc_q.weight
primals_3 = self.fc_q.bias
primals_4 = self.fc_k.weight
primals_5 = self.fc_k.bias
primals_7 = self.fc_v.weight
primals_8 = self.fc_v.bias
primals_10 = self.fc_o.weight
primals_11 = self.fc_o.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| salvacarrion/nmt-continual-learning | MultiHeadAttentionLayer | false | 7,600 | [
"MIT"
] | 1 | 302147ac9c270f3341a68a72c803c457f05ff37b | https://github.com/salvacarrion/nmt-continual-learning/tree/302147ac9c270f3341a68a72c803c457f05ff37b | import math
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, d_model, n_heads, dropout):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.fc_q = nn.Linear(d_model, d_model)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.scale = math.sqrt(d_model)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K_t = K.permute(0, 1, 3, 2)
energy = torch.matmul(Q, K_t) / self.scale
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e+18)
attention = torch.softmax(energy, dim=-1)
x = torch.matmul(self.dropout(attention), V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.d_model)
x = self.fc_o(x)
return x, attention
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 0.5]
|
VarianceNorm2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/xn/cxnrqltwvo6dabbyqkzkxc7ass6ujrtpjhjlltlyg2hxd5tvdjoa.py
# Topologically Sorted Source Nodes: [vars_1, add, sqrt, h, out], Original ATen: [aten.var, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# h => div
# out => mul
# sqrt => sqrt
# vars_1 => var
# Graph fragment:
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [2, 3]), kwargs = {correction: 1, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %div), kwargs = {})
triton_per_fused_add_div_mul_sqrt_var_0 = async_compile.triton('triton_per_fused_add_div_mul_sqrt_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_sqrt_var_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_sqrt_var_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 15.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp23 = tmp0 / tmp21
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (r1 + (16*x0)), tmp24, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [vars_1, add, sqrt, h, out], Original ATen: [aten.var, aten.add, aten.sqrt, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_sqrt_var_0.run(buf3, primals_1, primals_2, buf4, 16, 16, grid=grid(16), stream=stream0)
del primals_2
return (buf4, primals_1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class VarianceNorm2d(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, x):
vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(vars + 1e-05)
out = self.alpha.view(-1, self.num_features, 1, 1) * h
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_div_mul_sqrt_var_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp22 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 15.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp23 = tmp0 / tmp21
tmp24 = tmp22 * tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr0 + (r1 + 16 * x0), tmp24, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf3 = reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_mul_sqrt_var_0[grid(16)](buf3, primals_1,
primals_2, buf4, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_2
return buf4, primals_1, buf3
class VarianceNorm2dNew(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, input_0):
primals_2 = self.alpha
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| samsartor/score_sde | VarianceNorm2d | false | 7,601 | [
"Apache-2.0"
] | 1 | d25c8d092a68d643c796d771c55f80075aa041d1 | https://github.com/samsartor/score_sde/tree/d25c8d092a68d643c796d771c55f80075aa041d1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_features, bias=False):
super().__init__()
self.num_features = num_features
self.bias = bias
self.alpha = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
def forward(self, x):
vars = torch.var(x, dim=(2, 3), keepdim=True)
h = x / torch.sqrt(vars + 1e-05)
out = self.alpha.view(-1, self.num_features, 1, 1) * h
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
INDeConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/36/c36hdwspcdvpy5wt3j2jut3oepofxzxr2vli5wzw65y4jh7qfuf6.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => add, repeat, rsqrt, var_mean
# x_2 => relu
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_8, 0), kwargs = {})
triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 49
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (49*x0)), rmask & xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(rmask & xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 49, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(rmask & xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 49.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(out_ptr3 + (r1 + (49*x0)), tmp29, rmask & xmask)
tl.store(out_ptr4 + (r1 + (49*x0)), tmp31, rmask & xmask)
tl.store(out_ptr5 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf6 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.bool)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_0.run(primals_3, buf0, primals_4, buf1, buf2, buf6, buf7, buf5, 16, 49, grid=grid(16), stream=stream0)
del primals_3
del primals_4
return (buf6, primals_1, primals_2, buf0, buf1, reinterpret_tensor(buf5, (16, ), (1, ), 0), buf7, reinterpret_tensor(buf2, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class INDeConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, out_padding=0, dilation=1, groups=1, relu=True, ins_n=
True, bias=False):
super(INDeConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, output_padding=
out_padding, dilation=dilation, groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'out_planes': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_0(
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, out_ptr4,
out_ptr5, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 49
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
x0 = xindex
r1 = rindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 49 * x0), rmask & xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(rmask & xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 49, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(rmask & xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 49.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(out_ptr3 + (r1 + 49 * x0), tmp29, rmask & xmask)
tl.store(out_ptr4 + (r1 + 49 * x0), tmp31, rmask & xmask)
tl.store(out_ptr5 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 7, 7), (196, 49, 7, 1))
buf1 = empty_strided_cuda((16,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.bool)
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_relu_repeat_threshold_backward_0[
grid(16)](primals_3, buf0, primals_4, buf1, buf2, buf6, buf7,
buf5, 16, 49, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
del primals_4
return buf6, primals_1, primals_2, buf0, buf1, reinterpret_tensor(buf5,
(16,), (1,), 0), buf7, reinterpret_tensor(buf2, (1, 16, 1, 1), (16,
1, 1, 1), 0)
class INDeConvNew(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, out_padding=0, dilation=1, groups=1, relu=True, ins_n=
True, bias=False):
super(INDeConvNew, self).__init__()
self.out_channels = out_planes
self.conv = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, output_padding=
out_padding, dilation=dilation, groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.ins_n.weight
primals_4 = self.ins_n.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| samsgood0310/Unsupervised-Defect-Segmentation | INDeConv | false | 7,602 | [
"Apache-2.0"
] | 1 | 66af32506cd6e60c356890616e28d679622fd8e6 | https://github.com/samsgood0310/Unsupervised-Defect-Segmentation/tree/66af32506cd6e60c356890616e28d679622fd8e6 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, out_padding=0, dilation=1, groups=1, relu=True, ins_n=
True, bias=False):
super().__init__()
self.out_channels = out_planes
self.conv = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, output_padding=
out_padding, dilation=dilation, groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
PlainRefiner | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/7b/c7bwvkzrfyqe7on7r6rupptsqxo3x6vxvpuiow36csr3chlibccz.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/st/csta3znaz5s4as2tgqjkidnl23bqeegkhtqw4xrobqdt7oqbloam.py
# Topologically Sorted Source Nodes: [raw_refine, add, pred_refine], Original ATen: [aten.convolution, aten.add, aten.sigmoid]
# Source node to ATen node mapping:
# add => add
# pred_refine => sigmoid
# raw_refine => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_10, %convolution_3), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
triton_poi_fused_add_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_add_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = tl.sigmoid(tmp5)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 4096, grid=grid(4096), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [raw_refine], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [raw_refine, add, pred_refine], Original ATen: [aten.convolution, aten.add, aten.sigmoid]
triton_poi_fused_add_convolution_sigmoid_1.run(primals_10, buf6, primals_9, buf7, 256, grid=grid(256), stream=stream0)
del buf6
del primals_10
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PlainRefiner(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super(PlainRefiner, self).__init__()
self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3,
padding=1)
self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1
)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, x, raw_alpha):
"""Forward function.
Args:
x (Tensor): The input feature map of refiner.
raw_alpha (Tensor): The raw predicted alpha matte.
Returns:
Tensor: The refined alpha matte.
"""
out = self.relu(self.refine_conv1(x))
out = self.relu(self.refine_conv2(out))
out = self.relu(self.refine_conv3(out))
raw_refine = self.refine_pred(out)
pred_refine = torch.sigmoid(raw_alpha + raw_refine)
return pred_refine
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_sigmoid_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = tl.sigmoid(tmp5)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(4096)](buf1, primals_2,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(4096)](buf3, primals_5,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(4096)](buf5, primals_7,
4096, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_sigmoid_1[grid(256)](primals_10,
buf6, primals_9, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
del primals_10
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5, buf7)
class PlainRefinerNew(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super(PlainRefinerNew, self).__init__()
self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3,
padding=1)
self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1
)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, input_0, input_1):
primals_1 = self.refine_conv1.weight
primals_2 = self.refine_conv1.bias
primals_4 = self.refine_conv2.weight
primals_5 = self.refine_conv2.bias
primals_6 = self.refine_conv3.weight
primals_7 = self.refine_conv3.bias
primals_8 = self.refine_pred.weight
primals_9 = self.refine_pred.bias
primals_3 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| rivergold/mmediting | PlainRefiner | false | 7,603 | [
"Apache-2.0"
] | 1 | fd972635c48bb065db29d1b5090592a87c7263d2 | https://github.com/rivergold/mmediting/tree/fd972635c48bb065db29d1b5090592a87c7263d2 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super().__init__()
self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3,
padding=1)
self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1
)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, x, raw_alpha):
"""Forward function.
Args:
x (Tensor): The input feature map of refiner.
raw_alpha (Tensor): The raw predicted alpha matte.
Returns:
Tensor: The refined alpha matte.
"""
out = self.relu(self.refine_conv1(x))
out = self.relu(self.refine_conv2(out))
out = self.relu(self.refine_conv3(out))
raw_refine = self.refine_pred(out)
pred_refine = torch.sigmoid(raw_alpha + raw_refine)
return pred_refine
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
InstanceNorm2dPlus | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/na/cnayvtp4eqkhz442k3cjownix7hhd24svsd5geu5lll5pciu5wir.py
# Topologically Sorted Source Nodes: [means, h], Original ATen: [aten.mean, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# h => add_1, rsqrt, var_mean
# means => mean
# Graph fragment:
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_per_fused__native_batch_norm_legit_mean_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 5, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_mean_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp23, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr1 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rt/crti7wnji3fr725frh4vjayqrxmykwqwd2lug7drcmcjd2arhxox.py
# Topologically Sorted Source Nodes: [means, m, v, sub, add, sqrt, means_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# m => mean_1
# means => mean
# means_1 => div
# sqrt => sqrt
# sub => sub
# v => var
# Graph fragment:
# %mean : [num_users=3] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [2, 3]), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%mean, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%mean, [-1]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %mean_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
triton_poi_fused_add_div_mean_sqrt_sub_var_1 = async_compile.triton('triton_poi_fused_add_div_mean_sqrt_sub_var_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_sqrt_sub_var_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_sqrt_sub_var_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp6 = tmp5 / tmp1
tmp7 = tmp4 + tmp6
tmp9 = tmp8 / tmp1
tmp10 = tmp7 + tmp9
tmp12 = tmp11 / tmp1
tmp13 = tmp10 + tmp12
tmp14 = 4.0
tmp15 = tmp13 / tmp14
tmp16 = tmp2 - tmp15
tmp17 = tmp4 - tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp6 - tmp15
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp15
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp12 - tmp15
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tmp30 = 1e-05
tmp31 = tmp29 + tmp30
tmp32 = libdevice.sqrt(tmp31)
tmp33 = tmp16 / tmp32
tl.store(out_ptr0 + (x2), tmp33, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f2/cf2ifs7mrq64vpbthqedwiqpt32chlm6npncvlwvjpxxda2t2low.py
# Topologically Sorted Source Nodes: [mul, h_1, mul_1, out], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# h_1 => add_2
# mul => mul_1
# mul_1 => mul_2
# out => add_3
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %unsqueeze_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %add_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %view_3), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x3 = xindex
x4 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x4), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp0 * tmp9
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [means, h], Original ATen: [aten.mean, aten._native_batch_norm_legit]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_mean_0.run(buf5, primals_1, buf0, buf2, 16, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [means, m, v, sub, add, sqrt, means_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.sqrt, aten.div]
triton_poi_fused_add_div_mean_sqrt_sub_var_1.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, h_1, mul_1, out], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_2.run(primals_3, primals_1, buf2, buf5, buf1, primals_2, primals_4, buf6, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf6, primals_1, primals_2, primals_3, buf2, buf5, reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class InstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False,
track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / torch.sqrt(v + 1e-05)
h = self.instance_norm(x)
if self.bias:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1
) * h + self.beta.view(-1, self.num_features, 1, 1)
else:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_mean_0(in_out_ptr0, in_ptr0,
out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp1 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp23, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr1 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_sqrt_sub_var_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 16.0
tmp2 = tmp0 / tmp1
tmp4 = tmp3 / tmp1
tmp6 = tmp5 / tmp1
tmp7 = tmp4 + tmp6
tmp9 = tmp8 / tmp1
tmp10 = tmp7 + tmp9
tmp12 = tmp11 / tmp1
tmp13 = tmp10 + tmp12
tmp14 = 4.0
tmp15 = tmp13 / tmp14
tmp16 = tmp2 - tmp15
tmp17 = tmp4 - tmp15
tmp18 = tmp17 * tmp17
tmp19 = tmp6 - tmp15
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp15
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp12 - tmp15
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tmp30 = 1e-05
tmp31 = tmp29 + tmp30
tmp32 = libdevice.sqrt(tmp31)
tmp33 = tmp16 / tmp32
tl.store(out_ptr0 + x2, tmp33, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x4 = xindex // 16
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x4, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x4, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = tmp3 * tmp4
tmp8 = tmp6 * tmp7
tmp9 = tmp5 + tmp8
tmp10 = tmp0 * tmp9
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf2 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf5 = reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf3
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_mean_0[grid(16)](buf5,
primals_1, buf0, buf2, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_mean_sqrt_sub_var_1[grid(16)](buf0, buf1,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(256)](primals_3, primals_1, buf2,
buf5, buf1, primals_2, primals_4, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_4
return (buf6, primals_1, primals_2, primals_3, buf2, buf5,
reinterpret_tensor(buf1, (4, 4, 1, 1), (4, 1, 1, 1), 0))
class InstanceNorm2dPlusNew(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False,
track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, input_0):
primals_2 = self.alpha
primals_3 = self.gamma
primals_4 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| samsartor/score_sde | InstanceNorm2dPlus | false | 7,604 | [
"Apache-2.0"
] | 1 | d25c8d092a68d643c796d771c55f80075aa041d1 | https://github.com/samsartor/score_sde/tree/d25c8d092a68d643c796d771c55f80075aa041d1 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_features, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False,
track_running_stats=False)
self.alpha = nn.Parameter(torch.zeros(num_features))
self.gamma = nn.Parameter(torch.zeros(num_features))
self.alpha.data.normal_(1, 0.02)
self.gamma.data.normal_(1, 0.02)
if bias:
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / torch.sqrt(v + 1e-05)
h = self.instance_norm(x)
if self.bias:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1
) * h + self.beta.view(-1, self.num_features, 1, 1)
else:
h = h + means[..., None, None] * self.alpha[..., None, None]
out = self.gamma.view(-1, self.num_features, 1, 1) * h
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
BiaffineAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/c4/cc4khg7fwbxxm2fufox7nnkf4gfybrmj5ir2tx3zuxfioc5b2dya.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %primals_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/w2/cw2bwqpq3dkexeyqz25khcvdcedkdcrcwpb7zrtd6eayijd5lgez.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %view_4), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear]
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_1
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_3, primals_2, buf2, 512, grid=grid(512), stream=stream0)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf4, buf3, primals_5, 256, grid=grid(256), stream=stream0)
del buf3
del primals_5
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (64, 8), (8, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.checkpoint
import torch.utils.data
class BiaffineAttention(torch.nn.Module):
"""Implements a biaffine attention operator for binary relation classification.
PyTorch implementation of the biaffine attention operator from "End-to-end neural relation
extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used
as a classifier for binary relation classification.
Args:
in_features (int): The size of the feature dimension of the inputs.
out_features (int): The size of the feature dimension of the output.
Shape:
- x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of
additional dimensisons.
- x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of
additional dimensions.
- Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number
of additional dimensions.
Examples:
>>> batch_size, in_features, out_features = 32, 100, 4
>>> biaffine_attention = BiaffineAttention(in_features, out_features)
>>> x_1 = torch.randn(batch_size, in_features)
>>> x_2 = torch.randn(batch_size, in_features)
>>> output = biaffine_attention(x_1, x_2)
>>> print(output.size())
torch.Size([32, 4])
"""
def __init__(self, in_features, out_features):
super(BiaffineAttention, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features,
out_features, bias=False)
self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True)
self.reset_parameters()
def forward(self, x_1, x_2):
return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2),
dim=-1))
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.utils.checkpoint
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 8), (8, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor(
primals_3, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor(
primals_2, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_1
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_3, primals_2, buf2, 512,
XBLOCK=256, num_warps=4, num_stages=1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 8), (8, 1), 0),
reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf3)
del primals_4
buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (64, 8), (8, 1), 0)
class BiaffineAttentionNew(torch.nn.Module):
"""Implements a biaffine attention operator for binary relation classification.
PyTorch implementation of the biaffine attention operator from "End-to-end neural relation
extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used
as a classifier for binary relation classification.
Args:
in_features (int): The size of the feature dimension of the inputs.
out_features (int): The size of the feature dimension of the output.
Shape:
- x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of
additional dimensisons.
- x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of
additional dimensions.
- Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number
of additional dimensions.
Examples:
>>> batch_size, in_features, out_features = 32, 100, 4
>>> biaffine_attention = BiaffineAttention(in_features, out_features)
>>> x_1 = torch.randn(batch_size, in_features)
>>> x_2 = torch.randn(batch_size, in_features)
>>> output = biaffine_attention(x_1, x_2)
>>> print(output.size())
torch.Size([32, 4])
"""
def __init__(self, in_features, out_features):
super(BiaffineAttentionNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features,
out_features, bias=False)
self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True)
self.reset_parameters()
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
def forward(self, input_0, input_1):
primals_1 = self.bilinear.weight
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| rushabh-v/unilm | BiaffineAttention | false | 7,605 | [
"MIT"
] | 1 | a62a023bd5d3500c23ac454be0a8b0107e18a6ce | https://github.com/rushabh-v/unilm/tree/a62a023bd5d3500c23ac454be0a8b0107e18a6ce | import torch
import torch.utils.checkpoint
import torch.utils.data
class Model(torch.nn.Module):
"""Implements a biaffine attention operator for binary relation classification.
PyTorch implementation of the biaffine attention operator from "End-to-end neural relation
extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used
as a classifier for binary relation classification.
Args:
in_features (int): The size of the feature dimension of the inputs.
out_features (int): The size of the feature dimension of the output.
Shape:
- x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of
additional dimensisons.
- x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of
additional dimensions.
- Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number
of additional dimensions.
Examples:
>>> batch_size, in_features, out_features = 32, 100, 4
>>> biaffine_attention = BiaffineAttention(in_features, out_features)
>>> x_1 = torch.randn(batch_size, in_features)
>>> x_2 = torch.randn(batch_size, in_features)
>>> output = biaffine_attention(x_1, x_2)
>>> print(output.size())
torch.Size([32, 4])
"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features,
out_features, bias=False)
self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True)
self.reset_parameters()
def forward(self, x_1, x_2):
return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2),
dim=-1))
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SSD300 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/2q/c2qsph7yuvd4qrjdx7qhitc2tkim3pjng4rqgufiypesenwycnhv.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[67108864],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 67108864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 262144) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/se/csey4casydds7ttdva4dpczpio6jwynlr7qsuqonjcwfmq67hxyv.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16777216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (1024*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (1024*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (512 + (2*x0) + (1024*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (513 + (2*x0) + (1024*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/si/csisjq7rc4algelsz22lsae4qhhrrjvjryyw5k5o6x3fdlimo55m.py
# Topologically Sorted Source Nodes: [conv2d_2, out_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# out_3 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[33554432],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 33554432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 65536) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvcasx345h75eoxksekaeisc7iaf3bqneorw5etqpkzdja2ozs7.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = (xindex // 128)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (256 + (2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (257 + (2*x0) + (512*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pn/cpnor5ydof7dlspqdxdhkrhf2auj7pppdumfestnp6t2dvc7ahdp.py
# Topologically Sorted Source Nodes: [conv2d_4, out_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# out_6 => relu_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16777216],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16777216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16384) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yg/cygiwnm4ri26idrrwplrrcwdugludlchq2iib6x7f5lgij24xv3q.py
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_9 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + (2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (129 + (2*x0) + (256*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ro/cro7juuw5xd4di6yakssncsxdhnpfutfkymieevyezfopo5vi5f2.py
# Topologically Sorted Source Nodes: [conv2d_7, out_10], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_7 => convolution_7
# out_10 => relu_7
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/27/c27dahr6gu73agvkm5pgjug2pbakmm76uviwrqiqcnpmtijfjx7c.py
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_13 => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rz/crzaczqmdz32jx3wlam76xlof7bkrj4sqcvs2mxm2pldktqwxkjt.py
# Topologically Sorted Source Nodes: [conv2d_10, out_14], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# out_14 => relu_10
# Graph fragment:
# %convolution_10 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_10 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_10,), kwargs = {})
triton_poi_fused_convolution_relu_8 = async_compile.triton('triton_poi_fused_convolution_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ct/cctewtzbghhtqagpkqkvir7v3nfuy5ixuei5d65icnryikadosqc.py
# Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_17 => getitem_8, getitem_9
# Graph fragment:
# %getitem_8 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 0), kwargs = {})
# %getitem_9 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_4, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_9 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2097152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-33) + x4), tmp10, other=float("-inf"))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-32) + x4), tmp16, other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-31) + x4), tmp23, other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + x4), tmp30, other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (x4), tmp33, other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36, other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + x4), tmp43, other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + x4), tmp46, other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + x4), tmp49, other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x4), tmp51, None)
tl.store(out_ptr1 + (x4), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6k/c6k6gsglrybvjyfonqtp54l2icmsufqa67hpnv3btr4543ox255t.py
# Topologically Sorted Source Nodes: [conv2d_13, out_18], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# out_18 => relu_13
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_8, %primals_28, %primals_29, [1, 1], [6, 6], [6, 6], False, [0, 0], 1), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_13,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4194304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 1024
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/g6/cg6dnpxzqufsxykijivl4wos4pzjcbbtairqgnptitj2vdjgyiey.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, norm], Original ATen: [aten.pow, aten.sum, aten.sqrt]
# Source node to ATen node mapping:
# norm => sqrt
# pow_1 => pow_1
# sum_1 => sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu_9, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
triton_red_fused_pow_sqrt_sum_11 = async_compile.triton('triton_red_fused_pow_sqrt_sum_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[16384, 512],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_pow_sqrt_sum_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_pow_sqrt_sum_11(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 16384
rnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = (xindex // 4096)
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (2097152*x1)), rmask, eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = libdevice.sqrt(tmp3)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x3), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bn/cbnctktjgp7t3nzk7cbjdwatnjesdbubsp42k5hmnarqp4wy6aos.py
# Topologically Sorted Source Nodes: [conv4_3_feats, conv4_3_feats_1], Original ATen: [aten.div, aten.mul]
# Source node to ATen node mapping:
# conv4_3_feats => div
# conv4_3_feats_1 => mul
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%relu_9, %sqrt), kwargs = {})
# %mul : [num_users=3] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_32), kwargs = {})
triton_poi_fused_div_mul_12 = async_compile.triton('triton_poi_fused_div_mul_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8388608],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_mul_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8388608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 2097152)
x1 = (xindex // 4096) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x3), tmp2, None)
tl.store(out_ptr1 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7e/c7eo6nf5i4jbfcbm6repz4vmeacyjdvnhnob55afz6cmr27ssfpf.py
# Topologically Sorted Source Nodes: [conv2d_15, out_19], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_15 => convolution_15
# out_19 => relu_15
# Graph fragment:
# %convolution_15 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_14, %primals_33, %primals_34, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_15,), kwargs = {})
triton_poi_fused_convolution_relu_13 = async_compile.triton('triton_poi_fused_convolution_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/os/cosszfrjynxxkwdsxxfvdhcxozstp3jmgtlqb5zwrbcmgiswrqd3.py
# Topologically Sorted Source Nodes: [conv2d_16, out_20], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_16 => convolution_16
# out_20 => relu_16
# Graph fragment:
# %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_15, %primals_35, %primals_36, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_16 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_16,), kwargs = {})
triton_poi_fused_convolution_relu_14 = async_compile.triton('triton_poi_fused_convolution_relu_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 512
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4q/c4qi5rxcv3r3wq6y4cvvf3g2jgztsnqzhvjd624hhs7nn3zfyrza.py
# Topologically Sorted Source Nodes: [conv2d_17, out_21], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_17 => convolution_17
# out_21 => relu_17
# Graph fragment:
# %convolution_17 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_16, %primals_37, %primals_38, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_17 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_17,), kwargs = {})
triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nw/cnwta4czjivsbztus2tqw6ksxgwb53lhn4haikmufrci7ezow4lo.py
# Topologically Sorted Source Nodes: [conv2d_18, out_22], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_18 => convolution_18
# out_22 => relu_18
# Graph fragment:
# %convolution_18 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_17, %primals_39, %primals_40, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_18 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_18,), kwargs = {})
triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dy/cdyqtsyq3zalq6uxljpp7l7awgppvbql7xysw4zlqyrrtqm73a7t.py
# Topologically Sorted Source Nodes: [conv2d_19, out_23], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_19 => convolution_19
# out_23 => relu_19
# Graph fragment:
# %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_18, %primals_41, %primals_42, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_19 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_19,), kwargs = {})
triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fg/cfgcuo4oirqbbwiyditzzmzwst7ym5zfqol5vhilmjoswdttpouj.py
# Topologically Sorted Source Nodes: [conv2d_20, out_24], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_20 => convolution_20
# out_24 => relu_20
# Graph fragment:
# %convolution_20 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_19, %primals_43, %primals_44, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_20 : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_20,), kwargs = {})
triton_poi_fused_convolution_relu_18 = async_compile.triton('triton_poi_fused_convolution_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 36) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tz/ctzm62zmq4eeli7oqdvyfsjqefvgdi2gl2schefhtdg77ra6tgac.py
# Topologically Sorted Source Nodes: [conv2d_21, out_25], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_21 => convolution_21
# out_25 => relu_21
# Graph fragment:
# %convolution_21 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_20, %primals_45, %primals_46, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_21 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_21,), kwargs = {})
triton_poi_fused_convolution_relu_19 = async_compile.triton('triton_poi_fused_convolution_relu_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 36) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ss/csswcsc3cundvg6yebux77yizbxo3zagcavuqq5eppgqt4uhsq55.py
# Topologically Sorted Source Nodes: [conv2d_22, conv11_2_feats], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv11_2_feats => relu_22
# conv2d_22 => convolution_22
# Graph fragment:
# %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_21, %primals_47, %primals_48, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_22 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_22,), kwargs = {})
triton_poi_fused_convolution_relu_20 = async_compile.triton('triton_poi_fused_convolution_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cv/ccvahx445gtqwoibtu6zmqasjrfl7qfkuzhnrc4afyoqfxmjtlbc.py
# Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# locs => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1, %view_2, %view_3, %view_4, %view_5], 1), kwargs = {})
triton_poi_fused_cat_21 = async_compile.triton('triton_poi_fused_cat_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 394496
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 24656
x0 = xindex % 4
x2 = (xindex // 98624)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16384, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4096*((x0 + (4*x1)) % 16)) + (65536*(((x0 + (4*x1) + (65536*x2)) // 65536) % 4)) + (((x0 + (4*x1)) // 16) % 4096)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + ((x0 + (4*x1)) % 16), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 22528, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + ((1024*((x0 + (4*((-16384) + x1))) % 24)) + (24576*(((x0 + (4*((-16384) + x1)) + (24576*x2)) // 24576) % 4)) + (((x0 + (4*((-16384) + x1))) // 24) % 1024)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr3 + ((x0 + (4*((-16384) + x1))) % 24), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 24064, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + ((256*((x0 + (4*((-22528) + x1))) % 24)) + (6144*(((x0 + (4*((-22528) + x1)) + (6144*x2)) // 6144) % 4)) + (((x0 + (4*((-22528) + x1))) // 24) % 256)), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tl.load(in_ptr5 + ((x0 + (4*((-22528) + x1))) % 24), tmp22 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tmp29 = tl.full([1], 24448, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tmp28 & tmp30
tmp32 = tl.load(in_ptr6 + ((64*((x0 + (4*((-24064) + x1))) % 24)) + (1536*(((x0 + (4*((-24064) + x1)) + (1536*x2)) // 1536) % 4)) + (((x0 + (4*((-24064) + x1))) // 24) % 64)), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr7 + ((x0 + (4*((-24064) + x1))) % 24), tmp31 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp31, tmp34, tmp35)
tmp37 = tmp0 >= tmp29
tmp38 = tl.full([1], 24592, tl.int64)
tmp39 = tmp0 < tmp38
tmp40 = tmp37 & tmp39
tmp41 = tl.load(in_ptr8 + ((36*((x0 + (4*((-24448) + x1))) % 16)) + (576*(((x0 + (4*((-24448) + x1)) + (576*x2)) // 576) % 4)) + (((x0 + (4*((-24448) + x1))) // 16) % 36)), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr9 + ((x0 + (4*((-24448) + x1))) % 16), tmp40 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp41 + tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp40, tmp43, tmp44)
tmp46 = tmp0 >= tmp38
tmp47 = tl.full([1], 24656, tl.int64)
tmp48 = tmp0 < tmp47
tmp49 = tl.load(in_ptr10 + ((16*((x0 + (4*((-24592) + x1))) % 16)) + (256*(((x0 + (4*((-24592) + x1)) + (256*x2)) // 256) % 4)) + (((x0 + (4*((-24592) + x1))) // 16) % 16)), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp50 = tl.load(in_ptr11 + ((x0 + (4*((-24592) + x1))) % 16), tmp46 & xmask, eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp46, tmp51, tmp52)
tmp54 = tl.where(tmp40, tmp45, tmp53)
tmp55 = tl.where(tmp31, tmp36, tmp54)
tmp56 = tl.where(tmp22, tmp27, tmp55)
tmp57 = tl.where(tmp13, tmp18, tmp56)
tmp58 = tl.where(tmp4, tmp9, tmp57)
tl.store(out_ptr0 + (x3), tmp58, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 3, 512, 512), (786432, 262144, 512, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128, ), (1, ))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512, ), (1, ))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_29, (1024, ), (1, ))
assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_31, (1024, ), (1, ))
assert_size_stride(primals_32, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_34, (256, ), (1, ))
assert_size_stride(primals_35, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_36, (512, ), (1, ))
assert_size_stride(primals_37, (128, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_38, (128, ), (1, ))
assert_size_stride(primals_39, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_40, (256, ), (1, ))
assert_size_stride(primals_41, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_42, (128, ), (1, ))
assert_size_stride(primals_43, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_44, (256, ), (1, ))
assert_size_stride(primals_45, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_46, (128, ), (1, ))
assert_size_stride(primals_47, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_48, (256, ), (1, ))
assert_size_stride(primals_49, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_50, (16, ), (1, ))
assert_size_stride(primals_51, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_52, (24, ), (1, ))
assert_size_stride(primals_53, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_54, (24, ), (1, ))
assert_size_stride(primals_55, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_56, (24, ), (1, ))
assert_size_stride(primals_57, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_58, (16, ), (1, ))
assert_size_stride(primals_59, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_60, (16, ), (1, ))
assert_size_stride(primals_61, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_62, (16, ), (1, ))
assert_size_stride(primals_63, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_64, (24, ), (1, ))
assert_size_stride(primals_65, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_66, (24, ), (1, ))
assert_size_stride(primals_67, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_68, (24, ), (1, ))
assert_size_stride(primals_69, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_70, (16, ), (1, ))
assert_size_stride(primals_71, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_72, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 512, 512), (16777216, 262144, 512, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 67108864, grid=grid(67108864), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 512, 512), (16777216, 262144, 512, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 67108864, grid=grid(67108864), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf3, buf4, buf5, 16777216, grid=grid(16777216), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 256, 256), (8388608, 65536, 256, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf7, primals_7, 33554432, grid=grid(33554432), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 256, 256), (8388608, 65536, 256, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, out_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf9, primals_9, 33554432, grid=grid(33554432), stream=stream0)
del primals_9
buf10 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf9, buf10, buf11, 8388608, grid=grid(8388608), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 128, 128), (4194304, 16384, 128, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_4, out_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf13, primals_11, 16777216, grid=grid(16777216), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 128, 128), (4194304, 16384, 128, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, out_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf15, primals_13, 16777216, grid=grid(16777216), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 128, 128), (4194304, 16384, 128, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv2d_6, out_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf17, primals_15, 16777216, grid=grid(16777216), stream=stream0)
del primals_15
buf18 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.float32)
buf19 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf17, buf18, buf19, 4194304, grid=grid(4194304), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 512, 64, 64), (2097152, 4096, 64, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, out_10], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf21, primals_17, 8388608, grid=grid(8388608), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 64, 64), (2097152, 4096, 64, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [conv2d_8, out_11], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf23, primals_19, 8388608, grid=grid(8388608), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 64, 64), (2097152, 4096, 64, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, out_12], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf25, primals_21, 8388608, grid=grid(8388608), stream=stream0)
del primals_21
buf26 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32)
buf27 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf25, buf26, buf27, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 32, 32), (524288, 1024, 32, 1))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_10, out_14], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf29, primals_23, 2097152, grid=grid(2097152), stream=stream0)
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 512, 32, 32), (524288, 1024, 32, 1))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, out_15], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf31, primals_25, 2097152, grid=grid(2097152), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 512, 32, 32), (524288, 1024, 32, 1))
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [conv2d_12, out_16], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_8.run(buf33, primals_27, 2097152, grid=grid(2097152), stream=stream0)
del primals_27
buf34 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.float32)
buf35 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_9.run(buf33, buf34, buf35, 2097152, grid=grid(2097152), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf36 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(6, 6), dilation=(6, 6), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 1024, 32, 32), (1048576, 1024, 32, 1))
buf37 = buf36; del buf36 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, out_18], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf37, primals_29, 4194304, grid=grid(4194304), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 1024, 32, 32), (1048576, 1024, 32, 1))
buf39 = buf38; del buf38 # reuse
# Topologically Sorted Source Nodes: [conv2d_14, conv7_feats], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf39, primals_31, 4194304, grid=grid(4194304), stream=stream0)
del primals_31
buf40 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf40 # reuse
# Topologically Sorted Source Nodes: [pow_1, sum_1, norm], Original ATen: [aten.pow, aten.sum, aten.sqrt]
triton_red_fused_pow_sqrt_sum_11.run(buf41, buf25, 16384, 512, grid=grid(16384), stream=stream0)
buf42 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32)
buf43 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv4_3_feats, conv4_3_feats_1], Original ATen: [aten.div, aten.mul]
triton_poi_fused_div_mul_12.run(buf25, buf41, primals_32, buf42, buf43, 8388608, grid=grid(8388608), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution]
buf44 = extern_kernels.convolution(buf39, primals_33, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 256, 32, 32), (262144, 1024, 32, 1))
buf45 = buf44; del buf44 # reuse
# Topologically Sorted Source Nodes: [conv2d_15, out_19], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_13.run(buf45, primals_34, 1048576, grid=grid(1048576), stream=stream0)
del primals_34
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_35, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 16, 16), (131072, 256, 16, 1))
buf47 = buf46; del buf46 # reuse
# Topologically Sorted Source Nodes: [conv2d_16, out_20], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_14.run(buf47, primals_36, 524288, grid=grid(524288), stream=stream0)
del primals_36
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, primals_37, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 128, 16, 16), (32768, 256, 16, 1))
buf49 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [conv2d_17, out_21], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf49, primals_38, 131072, grid=grid(131072), stream=stream0)
del primals_38
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf50 = extern_kernels.convolution(buf49, primals_39, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 256, 8, 8), (16384, 64, 8, 1))
buf51 = buf50; del buf50 # reuse
# Topologically Sorted Source Nodes: [conv2d_18, out_22], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf51, primals_40, 65536, grid=grid(65536), stream=stream0)
del primals_40
# Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution]
buf52 = extern_kernels.convolution(buf51, primals_41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 128, 8, 8), (8192, 64, 8, 1))
buf53 = buf52; del buf52 # reuse
# Topologically Sorted Source Nodes: [conv2d_19, out_23], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf53, primals_42, 32768, grid=grid(32768), stream=stream0)
del primals_42
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_43, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 256, 6, 6), (9216, 36, 6, 1))
buf55 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [conv2d_20, out_24], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_18.run(buf55, primals_44, 36864, grid=grid(36864), stream=stream0)
del primals_44
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_45, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 6, 6), (4608, 36, 6, 1))
buf57 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [conv2d_21, out_25], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_19.run(buf57, primals_46, 18432, grid=grid(18432), stream=stream0)
del primals_46
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
buf58 = extern_kernels.convolution(buf57, primals_47, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 256, 4, 4), (4096, 16, 4, 1))
buf59 = buf58; del buf58 # reuse
# Topologically Sorted Source Nodes: [conv2d_22, conv11_2_feats], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_20.run(buf59, primals_48, 16384, grid=grid(16384), stream=stream0)
del primals_48
# Topologically Sorted Source Nodes: [l_conv4_3], Original ATen: [aten.convolution]
buf60 = extern_kernels.convolution(buf43, primals_49, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 16, 64, 64), (65536, 4096, 64, 1))
# Topologically Sorted Source Nodes: [l_conv7], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf39, primals_51, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 24, 32, 32), (24576, 1024, 32, 1))
# Topologically Sorted Source Nodes: [l_conv8_2], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf47, primals_53, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 24, 16, 16), (6144, 256, 16, 1))
# Topologically Sorted Source Nodes: [l_conv9_2], Original ATen: [aten.convolution]
buf63 = extern_kernels.convolution(buf51, primals_55, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 24, 8, 8), (1536, 64, 8, 1))
# Topologically Sorted Source Nodes: [l_conv10_2], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf55, primals_57, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 16, 6, 6), (576, 36, 6, 1))
# Topologically Sorted Source Nodes: [l_conv11_2], Original ATen: [aten.convolution]
buf65 = extern_kernels.convolution(buf59, primals_59, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 16, 4, 4), (256, 16, 4, 1))
# Topologically Sorted Source Nodes: [c_conv4_3], Original ATen: [aten.convolution]
buf66 = extern_kernels.convolution(buf43, primals_61, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 16, 64, 64), (65536, 4096, 64, 1))
# Topologically Sorted Source Nodes: [c_conv7], Original ATen: [aten.convolution]
buf67 = extern_kernels.convolution(buf39, primals_63, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 24, 32, 32), (24576, 1024, 32, 1))
# Topologically Sorted Source Nodes: [c_conv8_2], Original ATen: [aten.convolution]
buf68 = extern_kernels.convolution(buf47, primals_65, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 24, 16, 16), (6144, 256, 16, 1))
# Topologically Sorted Source Nodes: [c_conv9_2], Original ATen: [aten.convolution]
buf69 = extern_kernels.convolution(buf51, primals_67, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 24, 8, 8), (1536, 64, 8, 1))
# Topologically Sorted Source Nodes: [c_conv10_2], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf55, primals_69, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 16, 6, 6), (576, 36, 6, 1))
# Topologically Sorted Source Nodes: [c_conv11_2], Original ATen: [aten.convolution]
buf71 = extern_kernels.convolution(buf59, primals_71, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf71, (4, 16, 4, 4), (256, 16, 4, 1))
buf72 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [locs], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf60, primals_50, buf61, primals_52, buf62, primals_54, buf63, primals_56, buf64, primals_58, buf65, primals_60, buf72, 394496, grid=grid(394496), stream=stream0)
del buf60
del buf61
del buf62
del buf63
del buf64
del buf65
del primals_50
del primals_52
del primals_54
del primals_56
del primals_58
del primals_60
buf73 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [classes_scores], Original ATen: [aten.cat]
triton_poi_fused_cat_21.run(buf66, primals_62, buf67, primals_64, buf68, primals_66, buf69, primals_68, buf70, primals_70, buf71, primals_72, buf73, 394496, grid=grid(394496), stream=stream0)
del buf66
del buf67
del buf68
del buf69
del buf70
del buf71
del primals_62
del primals_64
del primals_66
del primals_68
del primals_70
del primals_72
return (buf72, buf73, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_33, primals_35, primals_37, primals_39, primals_41, primals_43, primals_45, primals_47, primals_49, primals_51, primals_53, primals_55, primals_57, primals_59, primals_61, primals_63, primals_65, primals_67, primals_69, primals_71, buf1, buf3, buf4, buf5, buf7, buf9, buf10, buf11, buf13, buf15, buf17, buf18, buf19, buf21, buf23, buf25, buf26, buf27, buf29, buf31, buf33, buf34, buf35, buf37, buf39, buf41, buf42, buf43, buf45, buf47, buf49, buf51, buf53, buf55, buf57, buf59, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 512, 512), (786432, 262144, 512, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((1024, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((1024, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((1, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((128, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((16, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((24, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((24, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((24, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((16, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torchvision
from torch import nn
import torch.nn.functional as F
from math import sqrt
from itertools import product as product
import torch.optim
import torch.utils.data
def decimate(tensor, m):
"""
Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value.
This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size.
:param tensor: tensor to be decimated
:param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension
:return: decimated tensor
"""
assert tensor.dim() == len(m)
for d in range(tensor.dim()):
if m[d] is not None:
tensor = tensor.index_select(dim=d, index=torch.arange(start=0,
end=tensor.size(d), step=m[d]).long())
return tensor
def cxcy_to_xy(cxcy):
"""
Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max).
:param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)
:return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)
"""
return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:,
2:] / 2], 1)
def find_intersection(set_1, set_2):
"""
Find the intersection of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].
unsqueeze(0))
upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].
unsqueeze(0))
intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0)
return intersection_dims[:, :, 0] * intersection_dims[:, :, 1]
def find_jaccard_overlap(set_1, set_2):
"""
Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
intersection = find_intersection(set_1, set_2)
areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1])
areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1])
union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection
return intersection / union
def gcxgcy_to_cxcy(gcxgcy, priors_cxcy):
"""
Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above.
They are decoded into center-size coordinates.
This is the inverse of the function above.
:param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4)
:param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4)
:return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4)
"""
return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy
[:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1)
class VGGBase(nn.Module):
"""
VGG base convolutions to produce lower-level feature maps.
"""
def __init__(self):
super(VGGBase, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
self.load_pretrained_layers()
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: lower-level feature maps conv4_3 and conv7
"""
out = F.relu(self.conv1_1(image))
out = F.relu(self.conv1_2(out))
out = self.pool1(out)
out = F.relu(self.conv2_1(out))
out = F.relu(self.conv2_2(out))
out = self.pool2(out)
out = F.relu(self.conv3_1(out))
out = F.relu(self.conv3_2(out))
out = F.relu(self.conv3_3(out))
out = self.pool3(out)
out = F.relu(self.conv4_1(out))
out = F.relu(self.conv4_2(out))
out = F.relu(self.conv4_3(out))
conv4_3_feats = out
out = self.pool4(out)
out = F.relu(self.conv5_1(out))
out = F.relu(self.conv5_2(out))
out = F.relu(self.conv5_3(out))
out = self.pool5(out)
out = F.relu(self.conv6(out))
conv7_feats = F.relu(self.conv7(out))
return conv4_3_feats, conv7_feats
def load_pretrained_layers(self):
"""
As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network.
There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16
We copy these parameters into our network. It's straightforward for conv1 to conv5.
However, the original VGG-16 does not contain the conv6 and con7 layers.
Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py.
"""
state_dict = self.state_dict()
param_names = list(state_dict.keys())
pretrained_state_dict = torchvision.models.vgg16(pretrained=True
).state_dict()
pretrained_param_names = list(pretrained_state_dict.keys())
for i, param in enumerate(param_names[:-4]):
state_dict[param] = pretrained_state_dict[pretrained_param_names[i]
]
conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(
4096, 512, 7, 7)
conv_fc6_bias = pretrained_state_dict['classifier.0.bias']
state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None,
3, 3])
state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4])
conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(
4096, 4096, 1, 1)
conv_fc7_bias = pretrained_state_dict['classifier.3.bias']
state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4,
None, None])
state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4])
self.load_state_dict(state_dict)
None
class AuxiliaryConvolutions(nn.Module):
"""
Additional convolutions to produce higher-level feature maps.
"""
def __init__(self):
super(AuxiliaryConvolutions, self).__init__()
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0)
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv7_feats):
"""
Forward propagation.
:param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2
"""
out = F.relu(self.conv8_1(conv7_feats))
out = F.relu(self.conv8_2(out))
conv8_2_feats = out
out = F.relu(self.conv9_1(out))
out = F.relu(self.conv9_2(out))
conv9_2_feats = out
out = F.relu(self.conv10_1(out))
out = F.relu(self.conv10_2(out))
conv10_2_feats = out
out = F.relu(self.conv11_1(out))
conv11_2_feats = F.relu(self.conv11_2(out))
return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats
class PredictionConvolutions(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes
n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6,
'conv10_2': 4, 'conv11_2': 4}
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4,
kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=
3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4,
kernel_size=3, padding=1)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes,
kernel_size=3, padding=1)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats,
conv9_2_feats, conv10_2_feats, conv11_2_feats):
"""
Forward propagation.
:param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38)
:param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10)
:param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5)
:param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3)
:param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
batch_size = conv4_3_feats.size(0)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats)
l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous()
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4)
l_conv7 = self.loc_conv7(conv7_feats)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous()
l_conv7 = l_conv7.view(batch_size, -1, 4)
l_conv8_2 = self.loc_conv8_2(conv8_2_feats)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous()
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous()
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4)
l_conv10_2 = self.loc_conv10_2(conv10_2_feats)
l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous()
l_conv10_2 = l_conv10_2.view(batch_size, -1, 4)
l_conv11_2 = self.loc_conv11_2(conv11_2_feats)
l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous()
l_conv11_2 = l_conv11_2.view(batch_size, -1, 4)
c_conv4_3 = self.cl_conv4_3(conv4_3_feats)
c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous()
c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes)
c_conv7 = self.cl_conv7(conv7_feats)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous()
c_conv7 = c_conv7.view(batch_size, -1, self.n_classes)
c_conv8_2 = self.cl_conv8_2(conv8_2_feats)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous()
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes)
c_conv9_2 = self.cl_conv9_2(conv9_2_feats)
c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous()
c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes)
c_conv10_2 = self.cl_conv10_2(conv10_2_feats)
c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous()
c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes)
c_conv11_2 = self.cl_conv11_2(conv11_2_feats)
c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous()
c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes)
locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2,
l_conv10_2, l_conv11_2], dim=1)
classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2,
c_conv9_2, c_conv10_2, c_conv11_2], dim=1)
return locs, classes_scores
class SSD300(nn.Module):
"""
The SSD300 network - encapsulates the base VGG network, auxiliary, and prediction convolutions.
"""
def __init__(self, n_classes):
super(SSD300, self).__init__()
self.n_classes = n_classes
self.base = VGGBase()
self.aux_convs = AuxiliaryConvolutions()
self.pred_convs = PredictionConvolutions(n_classes)
self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1))
nn.init.constant_(self.rescale_factors, 20)
self.priors_cxcy = self.create_prior_boxes()
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
conv4_3_feats, conv7_feats = self.base(image)
norm = conv4_3_feats.pow(2).sum(dim=1, keepdim=True).sqrt()
conv4_3_feats = conv4_3_feats / norm
conv4_3_feats = conv4_3_feats * self.rescale_factors
conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats = (self
.aux_convs(conv7_feats))
locs, classes_scores = self.pred_convs(conv4_3_feats, conv7_feats,
conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats)
return locs, classes_scores
def create_prior_boxes(self):
"""
Create the 8732 prior (default) boxes for the SSD300, as defined in the paper.
:return: prior boxes in center-size coordinates, a tensor of dimensions (8732, 4)
"""
fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2':
5, 'conv10_2': 3, 'conv11_2': 1}
obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375,
'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9}
aspect_ratios = {'conv4_3': [1.0, 2.0, 0.5], 'conv7': [1.0, 2.0,
3.0, 0.5, 0.333], 'conv8_2': [1.0, 2.0, 3.0, 0.5, 0.333],
'conv9_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv10_2': [1.0, 2.0,
0.5], 'conv11_2': [1.0, 2.0, 0.5]}
fmaps = list(fmap_dims.keys())
prior_boxes = []
for k, fmap in enumerate(fmaps):
for i in range(fmap_dims[fmap]):
for j in range(fmap_dims[fmap]):
cx = (j + 0.5) / fmap_dims[fmap]
cy = (i + 0.5) / fmap_dims[fmap]
for ratio in aspect_ratios[fmap]:
prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt
(ratio), obj_scales[fmap] / sqrt(ratio)])
if ratio == 1.0:
try:
additional_scale = sqrt(obj_scales[fmap] *
obj_scales[fmaps[k + 1]])
except IndexError:
additional_scale = 1.0
prior_boxes.append([cx, cy, additional_scale,
additional_scale])
prior_boxes = torch.FloatTensor(prior_boxes)
prior_boxes.clamp_(0, 1)
return prior_boxes
def detect_objects(self, predicted_locs, predicted_scores, min_score,
max_overlap, top_k):
"""
Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects.
For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.
:param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)
:param min_score: minimum threshold for a box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:return: detections (boxes, labels, and scores), lists of length batch_size
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
predicted_scores = F.softmax(predicted_scores, dim=2)
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
for i in range(batch_size):
decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i],
self.priors_cxcy))
image_boxes = list()
image_labels = list()
image_scores = list()
_max_scores, _best_label = predicted_scores[i].max(dim=1)
for c in range(1, self.n_classes):
class_scores = predicted_scores[i][:, c]
score_above_min_score = class_scores > min_score
n_above_min_score = score_above_min_score.sum().item()
if n_above_min_score == 0:
continue
class_scores = class_scores[score_above_min_score]
class_decoded_locs = decoded_locs[score_above_min_score]
class_scores, sort_ind = class_scores.sort(dim=0,
descending=True)
class_decoded_locs = class_decoded_locs[sort_ind]
overlap = find_jaccard_overlap(class_decoded_locs,
class_decoded_locs)
suppress = torch.zeros(n_above_min_score, dtype=torch.bool)
for box in range(class_decoded_locs.size(0)):
if suppress[box] == 1:
continue
suppress = suppress | (overlap[box] > max_overlap)
suppress[box] = 0
image_boxes.append(class_decoded_locs[~suppress])
image_labels.append(torch.LongTensor((~suppress).sum().item
() * [c]))
image_scores.append(class_scores[~suppress])
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0.0, 0.0, 1.0, 1.0]]))
image_labels.append(torch.LongTensor([0]))
image_scores.append(torch.FloatTensor([0.0]))
image_boxes = torch.cat(image_boxes, dim=0)
image_labels = torch.cat(image_labels, dim=0)
image_scores = torch.cat(image_scores, dim=0)
n_objects = image_scores.size(0)
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0,
descending=True)
image_scores = image_scores[:top_k]
image_boxes = image_boxes[sort_ind][:top_k]
image_labels = image_labels[sort_ind][:top_k]
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores
def get_inputs():
return [torch.rand([4, 3, 512, 512])]
def get_init_inputs():
return [[], {'n_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torchvision
from torch import nn
import torch.nn.functional as F
from math import sqrt
from itertools import product as product
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 262144 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 1024 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 1024 * x1), None,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (512 + 2 * x0 + 1024 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (513 + 2 * x0 + 1024 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 65536 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 128
x1 = xindex // 128
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 512 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 512 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (256 + 2 * x0 + 512 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (257 + 2 * x0 + 512 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16384 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 256 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 256 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (128 + 2 * x0 + 256 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (129 + 2 * x0 + 256 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_8(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-33 + x4), tmp10, other=float('-inf'))
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-32 + x4), tmp16, other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-31 + x4), tmp23, other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + x4), tmp30, other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + x4, tmp33, other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + x4), tmp36, other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + x4), tmp43, other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + x4), tmp46, other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + x4), tmp49, other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x4, tmp51, None)
tl.store(out_ptr1 + x4, tmp76, None)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 1024
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_red_fused_pow_sqrt_sum_11(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 4096
x1 = xindex // 4096
_tmp3 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
x3 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 2097152 * x1), rmask,
eviction_policy='evict_first', other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(rmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tmp5 = libdevice.sqrt(tmp3)
tl.debug_barrier()
tl.store(in_out_ptr0 + x3, tmp5, None)
@triton.jit
def triton_poi_fused_div_mul_12(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 2097152
x1 = xindex // 4096 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 / tmp1
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x3, tmp2, None)
tl.store(out_ptr1 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_13(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_14(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 512
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_18(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 36 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_19(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 36 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_20(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_21(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 394496
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 24656
x0 = xindex % 4
x2 = xindex // 98624
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 16384, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4096 * ((x0 + 4 * x1) % 16) + 65536 * ((x0 +
4 * x1 + 65536 * x2) // 65536 % 4) + (x0 + 4 * x1) // 16 % 4096),
tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 4 * x1) % 16, tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 22528, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr2 + (1024 * ((x0 + 4 * (-16384 + x1)) % 24) +
24576 * ((x0 + 4 * (-16384 + x1) + 24576 * x2) // 24576 % 4) + (x0 +
4 * (-16384 + x1)) // 24 % 1024), tmp13 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr3 + (x0 + 4 * (-16384 + x1)) % 24, tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 + tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 24064, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tmp19 & tmp21
tmp23 = tl.load(in_ptr4 + (256 * ((x0 + 4 * (-22528 + x1)) % 24) + 6144 *
((x0 + 4 * (-22528 + x1) + 6144 * x2) // 6144 % 4) + (x0 + 4 * (-
22528 + x1)) // 24 % 256), tmp22 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp24 = tl.load(in_ptr5 + (x0 + 4 * (-22528 + x1)) % 24, tmp22 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = tmp23 + tmp24
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp22, tmp25, tmp26)
tmp28 = tmp0 >= tmp20
tmp29 = tl.full([1], 24448, tl.int64)
tmp30 = tmp0 < tmp29
tmp31 = tmp28 & tmp30
tmp32 = tl.load(in_ptr6 + (64 * ((x0 + 4 * (-24064 + x1)) % 24) + 1536 *
((x0 + 4 * (-24064 + x1) + 1536 * x2) // 1536 % 4) + (x0 + 4 * (-
24064 + x1)) // 24 % 64), tmp31 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tl.load(in_ptr7 + (x0 + 4 * (-24064 + x1)) % 24, tmp31 & xmask,
eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 + tmp33
tmp35 = tl.full(tmp34.shape, 0.0, tmp34.dtype)
tmp36 = tl.where(tmp31, tmp34, tmp35)
tmp37 = tmp0 >= tmp29
tmp38 = tl.full([1], 24592, tl.int64)
tmp39 = tmp0 < tmp38
tmp40 = tmp37 & tmp39
tmp41 = tl.load(in_ptr8 + (36 * ((x0 + 4 * (-24448 + x1)) % 16) + 576 *
((x0 + 4 * (-24448 + x1) + 576 * x2) // 576 % 4) + (x0 + 4 * (-
24448 + x1)) // 16 % 36), tmp40 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp42 = tl.load(in_ptr9 + (x0 + 4 * (-24448 + x1)) % 16, tmp40 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp41 + tmp42
tmp44 = tl.full(tmp43.shape, 0.0, tmp43.dtype)
tmp45 = tl.where(tmp40, tmp43, tmp44)
tmp46 = tmp0 >= tmp38
tl.full([1], 24656, tl.int64)
tmp49 = tl.load(in_ptr10 + (16 * ((x0 + 4 * (-24592 + x1)) % 16) + 256 *
((x0 + 4 * (-24592 + x1) + 256 * x2) // 256 % 4) + (x0 + 4 * (-
24592 + x1)) // 16 % 16), tmp46 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp50 = tl.load(in_ptr11 + (x0 + 4 * (-24592 + x1)) % 16, tmp46 & xmask,
eviction_policy='evict_last', other=0.0)
tmp51 = tmp49 + tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp46, tmp51, tmp52)
tmp54 = tl.where(tmp40, tmp45, tmp53)
tmp55 = tl.where(tmp31, tmp36, tmp54)
tmp56 = tl.where(tmp22, tmp27, tmp55)
tmp57 = tl.where(tmp13, tmp18, tmp56)
tmp58 = tl.where(tmp4, tmp9, tmp57)
tl.store(out_ptr0 + x3, tmp58, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72) = args
args.clear()
assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 3, 512, 512), (786432, 262144, 512, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_9, (128,), (1,))
assert_size_stride(primals_10, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (512,), (1,))
assert_size_stride(primals_18, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (1024, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_29, (1024,), (1,))
assert_size_stride(primals_30, (1024, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_31, (1024,), (1,))
assert_size_stride(primals_32, (1, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_33, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_34, (256,), (1,))
assert_size_stride(primals_35, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_36, (512,), (1,))
assert_size_stride(primals_37, (128, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_38, (128,), (1,))
assert_size_stride(primals_39, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_40, (256,), (1,))
assert_size_stride(primals_41, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_42, (128,), (1,))
assert_size_stride(primals_43, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_44, (256,), (1,))
assert_size_stride(primals_45, (128, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_46, (128,), (1,))
assert_size_stride(primals_47, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_48, (256,), (1,))
assert_size_stride(primals_49, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_50, (16,), (1,))
assert_size_stride(primals_51, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_52, (24,), (1,))
assert_size_stride(primals_53, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_54, (24,), (1,))
assert_size_stride(primals_55, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_56, (24,), (1,))
assert_size_stride(primals_57, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_58, (16,), (1,))
assert_size_stride(primals_59, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_60, (16,), (1,))
assert_size_stride(primals_61, (16, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_62, (16,), (1,))
assert_size_stride(primals_63, (24, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_64, (24,), (1,))
assert_size_stride(primals_65, (24, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_66, (24,), (1,))
assert_size_stride(primals_67, (24, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_68, (24,), (1,))
assert_size_stride(primals_69, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_70, (16,), (1,))
assert_size_stride(primals_71, (16, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_72, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 512, 512), (16777216, 262144, 512, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(67108864)](buf1, primals_2,
67108864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 512, 512), (16777216, 262144, 512, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(67108864)](buf3, primals_5,
67108864, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256,
1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 256, 256), (4194304, 65536, 256,
1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(16777216)](buf3,
buf4, buf5, 16777216, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 256, 256), (8388608, 65536, 256, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_2[grid(33554432)](buf7, primals_7,
33554432, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 256, 256), (8388608, 65536, 256, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_2[grid(33554432)](buf9, primals_9,
33554432, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf10 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128,
1), torch.float32)
buf11 = empty_strided_cuda((4, 128, 128, 128), (2097152, 16384, 128,
1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(8388608)](buf9,
buf10, buf11, 8388608, XBLOCK=512, num_warps=8, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 256, 128, 128), (4194304, 16384, 128, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_4[grid(16777216)](buf13,
primals_11, 16777216, XBLOCK=512, num_warps=8, num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 128, 128), (4194304, 16384, 128, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_4[grid(16777216)](buf15,
primals_13, 16777216, XBLOCK=512, num_warps=8, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 256, 128, 128), (4194304, 16384, 128, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_4[grid(16777216)](buf17,
primals_15, 16777216, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf18 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1),
torch.float32)
buf19 = empty_strided_cuda((4, 256, 64, 64), (1048576, 4096, 64, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(4194304)](buf17,
buf18, buf19, 4194304, XBLOCK=512, num_warps=8, num_stages=1)
buf20 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 512, 64, 64), (2097152, 4096, 64, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_6[grid(8388608)](buf21,
primals_17, 8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_17
buf22 = extern_kernels.convolution(buf21, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 512, 64, 64), (2097152, 4096, 64, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_6[grid(8388608)](buf23,
primals_19, 8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 512, 64, 64), (2097152, 4096, 64, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_6[grid(8388608)](buf25,
primals_21, 8388608, XBLOCK=512, num_warps=8, num_stages=1)
del primals_21
buf26 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1),
torch.float32)
buf27 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(2097152)](buf25,
buf26, buf27, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
buf28 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 32, 32), (524288, 1024, 32, 1))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_8[grid(2097152)](buf29,
primals_23, 2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_23
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 512, 32, 32), (524288, 1024, 32, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_relu_8[grid(2097152)](buf31,
primals_25, 2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 512, 32, 32), (524288, 1024, 32, 1))
buf33 = buf32
del buf32
triton_poi_fused_convolution_relu_8[grid(2097152)](buf33,
primals_27, 2097152, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_27
buf34 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1),
torch.float32)
buf35 = empty_strided_cuda((4, 512, 32, 32), (524288, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_9[grid(2097152)](buf33,
buf34, buf35, 2097152, XBLOCK=512, num_warps=8, num_stages=1)
buf36 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1),
padding=(6, 6), dilation=(6, 6), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf36, (4, 1024, 32, 32), (1048576, 1024, 32, 1))
buf37 = buf36
del buf36
triton_poi_fused_convolution_relu_10[grid(4194304)](buf37,
primals_29, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_29
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 1024, 32, 32), (1048576, 1024, 32, 1))
buf39 = buf38
del buf38
triton_poi_fused_convolution_relu_10[grid(4194304)](buf39,
primals_31, 4194304, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf40 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1),
torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf40
triton_red_fused_pow_sqrt_sum_11[grid(16384)](buf41, buf25, 16384,
512, XBLOCK=64, RBLOCK=8, num_warps=4, num_stages=1)
buf42 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1),
torch.float32)
buf43 = empty_strided_cuda((4, 512, 64, 64), (2097152, 4096, 64, 1),
torch.float32)
triton_poi_fused_div_mul_12[grid(8388608)](buf25, buf41, primals_32,
buf42, buf43, 8388608, XBLOCK=1024, num_warps=4, num_stages=1)
buf44 = extern_kernels.convolution(buf39, primals_33, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf44, (4, 256, 32, 32), (262144, 1024, 32, 1))
buf45 = buf44
del buf44
triton_poi_fused_convolution_relu_13[grid(1048576)](buf45,
primals_34, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_34
buf46 = extern_kernels.convolution(buf45, primals_35, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf46, (4, 512, 16, 16), (131072, 256, 16, 1))
buf47 = buf46
del buf46
triton_poi_fused_convolution_relu_14[grid(524288)](buf47,
primals_36, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_36
buf48 = extern_kernels.convolution(buf47, primals_37, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 128, 16, 16), (32768, 256, 16, 1))
buf49 = buf48
del buf48
triton_poi_fused_convolution_relu_15[grid(131072)](buf49,
primals_38, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_38
buf50 = extern_kernels.convolution(buf49, primals_39, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf50, (4, 256, 8, 8), (16384, 64, 8, 1))
buf51 = buf50
del buf50
triton_poi_fused_convolution_relu_16[grid(65536)](buf51, primals_40,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_40
buf52 = extern_kernels.convolution(buf51, primals_41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf52, (4, 128, 8, 8), (8192, 64, 8, 1))
buf53 = buf52
del buf52
triton_poi_fused_convolution_relu_17[grid(32768)](buf53, primals_42,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_42
buf54 = extern_kernels.convolution(buf53, primals_43, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 256, 6, 6), (9216, 36, 6, 1))
buf55 = buf54
del buf54
triton_poi_fused_convolution_relu_18[grid(36864)](buf55, primals_44,
36864, XBLOCK=512, num_warps=4, num_stages=1)
del primals_44
buf56 = extern_kernels.convolution(buf55, primals_45, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 6, 6), (4608, 36, 6, 1))
buf57 = buf56
del buf56
triton_poi_fused_convolution_relu_19[grid(18432)](buf57, primals_46,
18432, XBLOCK=128, num_warps=4, num_stages=1)
del primals_46
buf58 = extern_kernels.convolution(buf57, primals_47, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf58, (4, 256, 4, 4), (4096, 16, 4, 1))
buf59 = buf58
del buf58
triton_poi_fused_convolution_relu_20[grid(16384)](buf59, primals_48,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_48
buf60 = extern_kernels.convolution(buf43, primals_49, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf60, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf61 = extern_kernels.convolution(buf39, primals_51, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 24, 32, 32), (24576, 1024, 32, 1))
buf62 = extern_kernels.convolution(buf47, primals_53, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf62, (4, 24, 16, 16), (6144, 256, 16, 1))
buf63 = extern_kernels.convolution(buf51, primals_55, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf63, (4, 24, 8, 8), (1536, 64, 8, 1))
buf64 = extern_kernels.convolution(buf55, primals_57, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 16, 6, 6), (576, 36, 6, 1))
buf65 = extern_kernels.convolution(buf59, primals_59, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 16, 4, 4), (256, 16, 4, 1))
buf66 = extern_kernels.convolution(buf43, primals_61, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf66, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf67 = extern_kernels.convolution(buf39, primals_63, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 24, 32, 32), (24576, 1024, 32, 1))
buf68 = extern_kernels.convolution(buf47, primals_65, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf68, (4, 24, 16, 16), (6144, 256, 16, 1))
buf69 = extern_kernels.convolution(buf51, primals_67, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf69, (4, 24, 8, 8), (1536, 64, 8, 1))
buf70 = extern_kernels.convolution(buf55, primals_69, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 16, 6, 6), (576, 36, 6, 1))
buf71 = extern_kernels.convolution(buf59, primals_71, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf71, (4, 16, 4, 4), (256, 16, 4, 1))
buf72 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32)
triton_poi_fused_cat_21[grid(394496)](buf60, primals_50, buf61,
primals_52, buf62, primals_54, buf63, primals_56, buf64,
primals_58, buf65, primals_60, buf72, 394496, XBLOCK=512,
num_warps=8, num_stages=1)
del buf60
del buf61
del buf62
del buf63
del buf64
del buf65
del primals_50
del primals_52
del primals_54
del primals_56
del primals_58
del primals_60
buf73 = empty_strided_cuda((4, 24656, 4), (98624, 4, 1), torch.float32)
triton_poi_fused_cat_21[grid(394496)](buf66, primals_62, buf67,
primals_64, buf68, primals_66, buf69, primals_68, buf70,
primals_70, buf71, primals_72, buf73, 394496, XBLOCK=512,
num_warps=8, num_stages=1)
del buf66
del buf67
del buf68
del buf69
del buf70
del buf71
del primals_62
del primals_64
del primals_66
del primals_68
del primals_70
del primals_72
return (buf72, buf73, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, primals_20, primals_22, primals_24, primals_26,
primals_28, primals_30, primals_32, primals_33, primals_35,
primals_37, primals_39, primals_41, primals_43, primals_45,
primals_47, primals_49, primals_51, primals_53, primals_55,
primals_57, primals_59, primals_61, primals_63, primals_65,
primals_67, primals_69, primals_71, buf1, buf3, buf4, buf5, buf7,
buf9, buf10, buf11, buf13, buf15, buf17, buf18, buf19, buf21, buf23,
buf25, buf26, buf27, buf29, buf31, buf33, buf34, buf35, buf37,
buf39, buf41, buf42, buf43, buf45, buf47, buf49, buf51, buf53,
buf55, buf57, buf59)
def decimate(tensor, m):
"""
Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value.
This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size.
:param tensor: tensor to be decimated
:param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension
:return: decimated tensor
"""
assert tensor.dim() == len(m)
for d in range(tensor.dim()):
if m[d] is not None:
tensor = tensor.index_select(dim=d, index=torch.arange(start=0,
end=tensor.size(d), step=m[d]).long())
return tensor
def cxcy_to_xy(cxcy):
"""
Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max).
:param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)
:return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)
"""
return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:,
2:] / 2], 1)
def find_intersection(set_1, set_2):
"""
Find the intersection of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].
unsqueeze(0))
upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].
unsqueeze(0))
intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0)
return intersection_dims[:, :, 0] * intersection_dims[:, :, 1]
def find_jaccard_overlap(set_1, set_2):
"""
Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
intersection = find_intersection(set_1, set_2)
areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1])
areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1])
union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection
return intersection / union
def gcxgcy_to_cxcy(gcxgcy, priors_cxcy):
"""
Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above.
They are decoded into center-size coordinates.
This is the inverse of the function above.
:param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4)
:param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4)
:return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4)
"""
return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy
[:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1)
class VGGBase(nn.Module):
"""
VGG base convolutions to produce lower-level feature maps.
"""
def __init__(self):
super(VGGBase, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
self.load_pretrained_layers()
def forward(self, image):
"""
Forward propagation.
:param image: images, a tensor of dimensions (N, 3, 300, 300)
:return: lower-level feature maps conv4_3 and conv7
"""
out = F.relu(self.conv1_1(image))
out = F.relu(self.conv1_2(out))
out = self.pool1(out)
out = F.relu(self.conv2_1(out))
out = F.relu(self.conv2_2(out))
out = self.pool2(out)
out = F.relu(self.conv3_1(out))
out = F.relu(self.conv3_2(out))
out = F.relu(self.conv3_3(out))
out = self.pool3(out)
out = F.relu(self.conv4_1(out))
out = F.relu(self.conv4_2(out))
out = F.relu(self.conv4_3(out))
conv4_3_feats = out
out = self.pool4(out)
out = F.relu(self.conv5_1(out))
out = F.relu(self.conv5_2(out))
out = F.relu(self.conv5_3(out))
out = self.pool5(out)
out = F.relu(self.conv6(out))
conv7_feats = F.relu(self.conv7(out))
return conv4_3_feats, conv7_feats
def load_pretrained_layers(self):
"""
As in the paper, we use a VGG-16 pretrained on the ImageNet task as the base network.
There's one available in PyTorch, see https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.vgg16
We copy these parameters into our network. It's straightforward for conv1 to conv5.
However, the original VGG-16 does not contain the conv6 and con7 layers.
Therefore, we convert fc6 and fc7 into convolutional layers, and subsample by decimation. See 'decimate' in utils.py.
"""
state_dict = self.state_dict()
param_names = list(state_dict.keys())
pretrained_state_dict = torchvision.models.vgg16(pretrained=True
).state_dict()
pretrained_param_names = list(pretrained_state_dict.keys())
for i, param in enumerate(param_names[:-4]):
state_dict[param] = pretrained_state_dict[pretrained_param_names[i]
]
conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(
4096, 512, 7, 7)
conv_fc6_bias = pretrained_state_dict['classifier.0.bias']
state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None,
3, 3])
state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4])
conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(
4096, 4096, 1, 1)
conv_fc7_bias = pretrained_state_dict['classifier.3.bias']
state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4,
None, None])
state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4])
self.load_state_dict(state_dict)
None
class AuxiliaryConvolutions(nn.Module):
"""
Additional convolutions to produce higher-level feature maps.
"""
def __init__(self):
super(AuxiliaryConvolutions, self).__init__()
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1, padding=0)
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1, padding=0)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1, padding=0)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3, padding=0)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv7_feats):
"""
Forward propagation.
:param conv7_feats: lower-level conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:return: higher-level feature maps conv8_2, conv9_2, conv10_2, and conv11_2
"""
out = F.relu(self.conv8_1(conv7_feats))
out = F.relu(self.conv8_2(out))
conv8_2_feats = out
out = F.relu(self.conv9_1(out))
out = F.relu(self.conv9_2(out))
conv9_2_feats = out
out = F.relu(self.conv10_1(out))
out = F.relu(self.conv10_2(out))
conv10_2_feats = out
out = F.relu(self.conv11_1(out))
conv11_2_feats = F.relu(self.conv11_2(out))
return conv8_2_feats, conv9_2_feats, conv10_2_feats, conv11_2_feats
class PredictionConvolutions(nn.Module):
"""
Convolutions to predict class scores and bounding boxes using lower and higher-level feature maps.
The bounding boxes (locations) are predicted as encoded offsets w.r.t each of the 8732 prior (default) boxes.
See 'cxcy_to_gcxgcy' in utils.py for the encoding definition.
The class scores represent the scores of each object class in each of the 8732 bounding boxes located.
A high score for 'background' = no object.
"""
def __init__(self, n_classes):
"""
:param n_classes: number of different types of objects
"""
super(PredictionConvolutions, self).__init__()
self.n_classes = n_classes
n_boxes = {'conv4_3': 4, 'conv7': 6, 'conv8_2': 6, 'conv9_2': 6,
'conv10_2': 4, 'conv11_2': 4}
self.loc_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * 4,
kernel_size=3, padding=1)
self.loc_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * 4, kernel_size=
3, padding=1)
self.loc_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * 4,
kernel_size=3, padding=1)
self.loc_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * 4,
kernel_size=3, padding=1)
self.cl_conv4_3 = nn.Conv2d(512, n_boxes['conv4_3'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv7 = nn.Conv2d(1024, n_boxes['conv7'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv8_2 = nn.Conv2d(512, n_boxes['conv8_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv9_2 = nn.Conv2d(256, n_boxes['conv9_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv10_2 = nn.Conv2d(256, n_boxes['conv10_2'] * n_classes,
kernel_size=3, padding=1)
self.cl_conv11_2 = nn.Conv2d(256, n_boxes['conv11_2'] * n_classes,
kernel_size=3, padding=1)
self.init_conv2d()
def init_conv2d(self):
"""
Initialize convolution parameters.
"""
for c in self.children():
if isinstance(c, nn.Conv2d):
nn.init.xavier_uniform_(c.weight)
nn.init.constant_(c.bias, 0.0)
def forward(self, conv4_3_feats, conv7_feats, conv8_2_feats,
conv9_2_feats, conv10_2_feats, conv11_2_feats):
"""
Forward propagation.
:param conv4_3_feats: conv4_3 feature map, a tensor of dimensions (N, 512, 38, 38)
:param conv7_feats: conv7 feature map, a tensor of dimensions (N, 1024, 19, 19)
:param conv8_2_feats: conv8_2 feature map, a tensor of dimensions (N, 512, 10, 10)
:param conv9_2_feats: conv9_2 feature map, a tensor of dimensions (N, 256, 5, 5)
:param conv10_2_feats: conv10_2 feature map, a tensor of dimensions (N, 256, 3, 3)
:param conv11_2_feats: conv11_2 feature map, a tensor of dimensions (N, 256, 1, 1)
:return: 8732 locations and class scores (i.e. w.r.t each prior box) for each image
"""
batch_size = conv4_3_feats.size(0)
l_conv4_3 = self.loc_conv4_3(conv4_3_feats)
l_conv4_3 = l_conv4_3.permute(0, 2, 3, 1).contiguous()
l_conv4_3 = l_conv4_3.view(batch_size, -1, 4)
l_conv7 = self.loc_conv7(conv7_feats)
l_conv7 = l_conv7.permute(0, 2, 3, 1).contiguous()
l_conv7 = l_conv7.view(batch_size, -1, 4)
l_conv8_2 = self.loc_conv8_2(conv8_2_feats)
l_conv8_2 = l_conv8_2.permute(0, 2, 3, 1).contiguous()
l_conv8_2 = l_conv8_2.view(batch_size, -1, 4)
l_conv9_2 = self.loc_conv9_2(conv9_2_feats)
l_conv9_2 = l_conv9_2.permute(0, 2, 3, 1).contiguous()
l_conv9_2 = l_conv9_2.view(batch_size, -1, 4)
l_conv10_2 = self.loc_conv10_2(conv10_2_feats)
l_conv10_2 = l_conv10_2.permute(0, 2, 3, 1).contiguous()
l_conv10_2 = l_conv10_2.view(batch_size, -1, 4)
l_conv11_2 = self.loc_conv11_2(conv11_2_feats)
l_conv11_2 = l_conv11_2.permute(0, 2, 3, 1).contiguous()
l_conv11_2 = l_conv11_2.view(batch_size, -1, 4)
c_conv4_3 = self.cl_conv4_3(conv4_3_feats)
c_conv4_3 = c_conv4_3.permute(0, 2, 3, 1).contiguous()
c_conv4_3 = c_conv4_3.view(batch_size, -1, self.n_classes)
c_conv7 = self.cl_conv7(conv7_feats)
c_conv7 = c_conv7.permute(0, 2, 3, 1).contiguous()
c_conv7 = c_conv7.view(batch_size, -1, self.n_classes)
c_conv8_2 = self.cl_conv8_2(conv8_2_feats)
c_conv8_2 = c_conv8_2.permute(0, 2, 3, 1).contiguous()
c_conv8_2 = c_conv8_2.view(batch_size, -1, self.n_classes)
c_conv9_2 = self.cl_conv9_2(conv9_2_feats)
c_conv9_2 = c_conv9_2.permute(0, 2, 3, 1).contiguous()
c_conv9_2 = c_conv9_2.view(batch_size, -1, self.n_classes)
c_conv10_2 = self.cl_conv10_2(conv10_2_feats)
c_conv10_2 = c_conv10_2.permute(0, 2, 3, 1).contiguous()
c_conv10_2 = c_conv10_2.view(batch_size, -1, self.n_classes)
c_conv11_2 = self.cl_conv11_2(conv11_2_feats)
c_conv11_2 = c_conv11_2.permute(0, 2, 3, 1).contiguous()
c_conv11_2 = c_conv11_2.view(batch_size, -1, self.n_classes)
locs = torch.cat([l_conv4_3, l_conv7, l_conv8_2, l_conv9_2,
l_conv10_2, l_conv11_2], dim=1)
classes_scores = torch.cat([c_conv4_3, c_conv7, c_conv8_2,
c_conv9_2, c_conv10_2, c_conv11_2], dim=1)
return locs, classes_scores
class SSD300New(nn.Module):
"""
The SSD300 network - encapsulates the base VGG network, auxiliary, and prediction convolutions.
"""
def __init__(self, n_classes):
super(SSD300New, self).__init__()
self.n_classes = n_classes
self.base = VGGBase()
self.aux_convs = AuxiliaryConvolutions()
self.pred_convs = PredictionConvolutions(n_classes)
self.rescale_factors = nn.Parameter(torch.FloatTensor(1, 512, 1, 1))
nn.init.constant_(self.rescale_factors, 20)
self.priors_cxcy = self.create_prior_boxes()
def create_prior_boxes(self):
"""
Create the 8732 prior (default) boxes for the SSD300, as defined in the paper.
:return: prior boxes in center-size coordinates, a tensor of dimensions (8732, 4)
"""
fmap_dims = {'conv4_3': 38, 'conv7': 19, 'conv8_2': 10, 'conv9_2':
5, 'conv10_2': 3, 'conv11_2': 1}
obj_scales = {'conv4_3': 0.1, 'conv7': 0.2, 'conv8_2': 0.375,
'conv9_2': 0.55, 'conv10_2': 0.725, 'conv11_2': 0.9}
aspect_ratios = {'conv4_3': [1.0, 2.0, 0.5], 'conv7': [1.0, 2.0,
3.0, 0.5, 0.333], 'conv8_2': [1.0, 2.0, 3.0, 0.5, 0.333],
'conv9_2': [1.0, 2.0, 3.0, 0.5, 0.333], 'conv10_2': [1.0, 2.0,
0.5], 'conv11_2': [1.0, 2.0, 0.5]}
fmaps = list(fmap_dims.keys())
prior_boxes = []
for k, fmap in enumerate(fmaps):
for i in range(fmap_dims[fmap]):
for j in range(fmap_dims[fmap]):
cx = (j + 0.5) / fmap_dims[fmap]
cy = (i + 0.5) / fmap_dims[fmap]
for ratio in aspect_ratios[fmap]:
prior_boxes.append([cx, cy, obj_scales[fmap] * sqrt
(ratio), obj_scales[fmap] / sqrt(ratio)])
if ratio == 1.0:
try:
additional_scale = sqrt(obj_scales[fmap] *
obj_scales[fmaps[k + 1]])
except IndexError:
additional_scale = 1.0
prior_boxes.append([cx, cy, additional_scale,
additional_scale])
prior_boxes = torch.FloatTensor(prior_boxes)
prior_boxes.clamp_(0, 1)
return prior_boxes
def detect_objects(self, predicted_locs, predicted_scores, min_score,
max_overlap, top_k):
"""
Decipher the 8732 locations and class scores (output of ths SSD300) to detect objects.
For each class, perform Non-Maximum Suppression (NMS) on boxes that are above a minimum threshold.
:param predicted_locs: predicted locations/boxes w.r.t the 8732 prior boxes, a tensor of dimensions (N, 8732, 4)
:param predicted_scores: class scores for each of the encoded locations/boxes, a tensor of dimensions (N, 8732, n_classes)
:param min_score: minimum threshold for a box to be considered a match for a certain class
:param max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via NMS
:param top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
:return: detections (boxes, labels, and scores), lists of length batch_size
"""
batch_size = predicted_locs.size(0)
n_priors = self.priors_cxcy.size(0)
predicted_scores = F.softmax(predicted_scores, dim=2)
all_images_boxes = list()
all_images_labels = list()
all_images_scores = list()
assert n_priors == predicted_locs.size(1) == predicted_scores.size(1)
for i in range(batch_size):
decoded_locs = cxcy_to_xy(gcxgcy_to_cxcy(predicted_locs[i],
self.priors_cxcy))
image_boxes = list()
image_labels = list()
image_scores = list()
_max_scores, _best_label = predicted_scores[i].max(dim=1)
for c in range(1, self.n_classes):
class_scores = predicted_scores[i][:, c]
score_above_min_score = class_scores > min_score
n_above_min_score = score_above_min_score.sum().item()
if n_above_min_score == 0:
continue
class_scores = class_scores[score_above_min_score]
class_decoded_locs = decoded_locs[score_above_min_score]
class_scores, sort_ind = class_scores.sort(dim=0,
descending=True)
class_decoded_locs = class_decoded_locs[sort_ind]
overlap = find_jaccard_overlap(class_decoded_locs,
class_decoded_locs)
suppress = torch.zeros(n_above_min_score, dtype=torch.bool)
for box in range(class_decoded_locs.size(0)):
if suppress[box] == 1:
continue
suppress = suppress | (overlap[box] > max_overlap)
suppress[box] = 0
image_boxes.append(class_decoded_locs[~suppress])
image_labels.append(torch.LongTensor((~suppress).sum().item
() * [c]))
image_scores.append(class_scores[~suppress])
if len(image_boxes) == 0:
image_boxes.append(torch.FloatTensor([[0.0, 0.0, 1.0, 1.0]]))
image_labels.append(torch.LongTensor([0]))
image_scores.append(torch.FloatTensor([0.0]))
image_boxes = torch.cat(image_boxes, dim=0)
image_labels = torch.cat(image_labels, dim=0)
image_scores = torch.cat(image_scores, dim=0)
n_objects = image_scores.size(0)
if n_objects > top_k:
image_scores, sort_ind = image_scores.sort(dim=0,
descending=True)
image_scores = image_scores[:top_k]
image_boxes = image_boxes[sort_ind][:top_k]
image_labels = image_labels[sort_ind][:top_k]
all_images_boxes.append(image_boxes)
all_images_labels.append(image_labels)
all_images_scores.append(image_scores)
return all_images_boxes, all_images_labels, all_images_scores
def forward(self, input_0):
primals_32 = self.rescale_factors
primals_1 = self.base.conv1_1.weight
primals_2 = self.base.conv1_1.bias
primals_4 = self.base.conv1_2.weight
primals_5 = self.base.conv1_2.bias
primals_6 = self.base.conv2_1.weight
primals_7 = self.base.conv2_1.bias
primals_8 = self.base.conv2_2.weight
primals_9 = self.base.conv2_2.bias
primals_10 = self.base.conv3_1.weight
primals_11 = self.base.conv3_1.bias
primals_12 = self.base.conv3_2.weight
primals_13 = self.base.conv3_2.bias
primals_14 = self.base.conv3_3.weight
primals_15 = self.base.conv3_3.bias
primals_16 = self.base.conv4_1.weight
primals_17 = self.base.conv4_1.bias
primals_18 = self.base.conv4_2.weight
primals_19 = self.base.conv4_2.bias
primals_20 = self.base.conv4_3.weight
primals_21 = self.base.conv4_3.bias
primals_22 = self.base.conv5_1.weight
primals_23 = self.base.conv5_1.bias
primals_24 = self.base.conv5_2.weight
primals_25 = self.base.conv5_2.bias
primals_26 = self.base.conv5_3.weight
primals_27 = self.base.conv5_3.bias
primals_28 = self.base.conv6.weight
primals_29 = self.base.conv6.bias
primals_30 = self.base.conv7.weight
primals_31 = self.base.conv7.bias
primals_33 = self.aux_convs.conv8_1.weight
primals_34 = self.aux_convs.conv8_1.bias
primals_35 = self.aux_convs.conv8_2.weight
primals_36 = self.aux_convs.conv8_2.bias
primals_37 = self.aux_convs.conv9_1.weight
primals_38 = self.aux_convs.conv9_1.bias
primals_39 = self.aux_convs.conv9_2.weight
primals_40 = self.aux_convs.conv9_2.bias
primals_41 = self.aux_convs.conv10_1.weight
primals_42 = self.aux_convs.conv10_1.bias
primals_43 = self.aux_convs.conv10_2.weight
primals_44 = self.aux_convs.conv10_2.bias
primals_45 = self.aux_convs.conv11_1.weight
primals_46 = self.aux_convs.conv11_1.bias
primals_47 = self.aux_convs.conv11_2.weight
primals_48 = self.aux_convs.conv11_2.bias
primals_49 = self.pred_convs.loc_conv4_3.weight
primals_50 = self.pred_convs.loc_conv4_3.bias
primals_51 = self.pred_convs.loc_conv7.weight
primals_52 = self.pred_convs.loc_conv7.bias
primals_53 = self.pred_convs.loc_conv8_2.weight
primals_54 = self.pred_convs.loc_conv8_2.bias
primals_55 = self.pred_convs.loc_conv9_2.weight
primals_56 = self.pred_convs.loc_conv9_2.bias
primals_57 = self.pred_convs.loc_conv10_2.weight
primals_58 = self.pred_convs.loc_conv10_2.bias
primals_59 = self.pred_convs.loc_conv11_2.weight
primals_60 = self.pred_convs.loc_conv11_2.bias
primals_61 = self.pred_convs.cl_conv4_3.weight
primals_62 = self.pred_convs.cl_conv4_3.bias
primals_63 = self.pred_convs.cl_conv7.weight
primals_64 = self.pred_convs.cl_conv7.bias
primals_65 = self.pred_convs.cl_conv8_2.weight
primals_66 = self.pred_convs.cl_conv8_2.bias
primals_67 = self.pred_convs.cl_conv9_2.weight
primals_68 = self.pred_convs.cl_conv9_2.bias
primals_69 = self.pred_convs.cl_conv10_2.weight
primals_70 = self.pred_convs.cl_conv10_2.bias
primals_71 = self.pred_convs.cl_conv11_2.weight
primals_72 = self.pred_convs.cl_conv11_2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72])
return output[0], output[1]
| dee-walia20/SSD-Implementation-using-Pytorch | SSD300 | false | 7,606 | [
"MIT"
] | 1 | 2a7dcdcea2787f4bffd45f335819f08af2b525dd | https://github.com/dee-walia20/SSD-Implementation-using-Pytorch/tree/2a7dcdcea2787f4bffd45f335819f08af2b525dd | import torch
import torchvision
from torch import nn
import torch.nn.functional as F
from math import sqrt
from itertools import product as product
import torch.optim
import torch.utils.data
def decimate(tensor, m):
"""
Decimate a tensor by a factor 'm', i.e. downsample by keeping every 'm'th value.
This is used when we convert FC layers to equivalent Convolutional layers, BUT of a smaller size.
:param tensor: tensor to be decimated
:param m: list of decimation factors for each dimension of the tensor; None if not to be decimated along a dimension
:return: decimated tensor
"""
assert tensor.dim() == len(m)
for d in range(tensor.dim()):
if m[d] is not None:
tensor = tensor.index_select(dim=d, index=torch.arange(start=0,
end=tensor.size(d), step=m[d]).long())
return tensor
def cxcy_to_xy(cxcy):
"""
Convert bounding boxes from center-size coordinates (c_x, c_y, w, h) to boundary coordinates (x_min, y_min, x_max, y_max).
:param cxcy: bounding boxes in center-size coordinates, a tensor of size (n_boxes, 4)
:return: bounding boxes in boundary coordinates, a tensor of size (n_boxes, 4)
"""
return torch.cat([cxcy[:, :2] - cxcy[:, 2:] / 2, cxcy[:, :2] + cxcy[:,
2:] / 2], 1)
def find_intersection(set_1, set_2):
"""
Find the intersection of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: intersection of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].
unsqueeze(0))
upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].
unsqueeze(0))
intersection_dims = torch.clamp(upper_bounds - lower_bounds, min=0)
return intersection_dims[:, :, 0] * intersection_dims[:, :, 1]
def find_jaccard_overlap(set_1, set_2):
"""
Find the Jaccard Overlap (IoU) of every box combination between two sets of boxes that are in boundary coordinates.
:param set_1: set 1, a tensor of dimensions (n1, 4)
:param set_2: set 2, a tensor of dimensions (n2, 4)
:return: Jaccard Overlap of each of the boxes in set 1 with respect to each of the boxes in set 2, a tensor of dimensions (n1, n2)
"""
intersection = find_intersection(set_1, set_2)
areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1])
areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1])
union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection
return intersection / union
def gcxgcy_to_cxcy(gcxgcy, priors_cxcy):
"""
Decode bounding box coordinates predicted by the model, since they are encoded in the form mentioned above.
They are decoded into center-size coordinates.
This is the inverse of the function above.
:param gcxgcy: encoded bounding boxes, i.e. output of the model, a tensor of size (n_priors, 4)
:param priors_cxcy: prior boxes with respect to which the encoding is defined, a tensor of size (n_priors, 4)
:return: decoded bounding boxes in center-size form, a tensor of size (n_priors, 4)
"""
return torch.cat([gcxgcy[:, :2] * priors_cxcy[:, 2:] / 10 + priors_cxcy
[:, :2], torch.exp(gcxgcy[:, 2:] / 5) * priors_cxcy[:, 2:]], 1)
class VGGBase(nn.Module):
"""
VGG base convolutions to produce lower-level feature maps.
"""
def __init__(self):
super().__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.poo
# ... truncated (>4000 chars) for memory efficiency |
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zj/czjbomcj7alq7d2rpyq5jovfb4r4f7jmlbscailane6tzp5bclsn.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten._native_batch_norm_legit, aten.elu]
# Source node to ATen node mapping:
# output => var_mean
# output_1 => expm1, gt, mul_1, mul_3, where
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul_1, %mul_3), kwargs = {})
triton_per_fused__native_batch_norm_legit_elu_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_elu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_elu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_elu_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp24 = 0.0
tmp25 = tmp23 > tmp24
tmp26 = 1.0
tmp27 = tmp23 * tmp26
tmp28 = libdevice.expm1(tmp27)
tmp29 = tmp28 * tmp26
tmp30 = tl.where(tmp25, tmp27, tmp29)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/at/catgmbzwhox3nc2xeo6zs6mzasfybxlwxttgaih7j6pjgsp6x4l5.py
# Topologically Sorted Source Nodes: [output_2, output_3, output_4], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.elu]
# Source node to ATen node mapping:
# output_2 => convolution
# output_3 => add_1, rsqrt_1, var_mean_1
# output_4 => expm1_1, gt_1, mul_5, mul_7, where_1
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 1.0), kwargs = {})
# %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_5,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_5, %mul_7), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_elu_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_elu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_elu_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_elu_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 1.0
tmp29 = tmp25 * tmp28
tmp30 = libdevice.expm1(tmp29)
tmp31 = tmp30 * tmp28
tmp32 = tl.where(tmp27, tmp29, tmp31)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr1 + (r2 + (16*x3)), tmp32, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rb/crbiu4u5ytzqglii4fqbg7tcmfshrvsdjcxspfuroyibunmibr5i.py
# Topologically Sorted Source Nodes: [output_5, add], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add_2
# output_5 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_1), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten._native_batch_norm_legit, aten.elu]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_elu_0.run(primals_1, buf3, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf9 = reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf7 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_2, output_3, output_4], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.elu]
triton_per_fused__native_batch_norm_legit_convolution_elu_1.run(buf5, buf9, primals_3, buf6, buf10, 16, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [output_5], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [output_5, add], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(buf12, primals_1, primals_5, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf12, primals_2, primals_4, buf3, buf5, buf6, buf9, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from functools import partial
def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1,
init_scale=1.0, padding=1):
"""3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2."""
init_scale = 1e-10 if init_scale == 0 else init_scale
conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias,
dilation=dilation, padding=padding, kernel_size=3)
conv.weight.data *= init_scale
if bias:
conv.bias.data *= init_scale
return conv
def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1,
init_scale=1.0, padding=0):
"""1x1 convolution. Same as NCSNv1/v2."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=bias, dilation=dilation, padding=padding)
init_scale = 1e-10 if init_scale == 0 else init_scale
conv.weight.data *= init_scale
conv.bias.data *= init_scale
return conv
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return output
class ResidualBlock(nn.Module):
def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(),
normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1):
super().__init__()
self.non_linearity = act
self.input_dim = input_dim
self.output_dim = output_dim
self.resample = resample
self.normalization = normalization
if resample == 'down':
if dilation > 1:
self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=
dilation)
self.normalize2 = normalization(input_dim)
self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=
dilation)
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
else:
self.conv1 = ncsn_conv3x3(input_dim, input_dim)
self.normalize2 = normalization(input_dim)
self.conv2 = ConvMeanPool(input_dim, output_dim, 3,
adjust_padding=adjust_padding)
conv_shortcut = partial(ConvMeanPool, kernel_size=1,
adjust_padding=adjust_padding)
elif resample is None:
if dilation > 1:
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=
dilation)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=
dilation)
else:
conv_shortcut = partial(ncsn_conv1x1)
self.conv1 = ncsn_conv3x3(input_dim, output_dim)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim)
else:
raise Exception('invalid resample value')
if output_dim != input_dim or resample is not None:
self.shortcut = conv_shortcut(input_dim, output_dim)
self.normalize1 = normalization(input_dim)
def forward(self, x):
output = self.normalize1(x)
output = self.non_linearity(output)
output = self.conv1(output)
output = self.normalize2(output)
output = self.non_linearity(output)
output = self.conv2(output)
if self.output_dim == self.input_dim and self.resample is None:
shortcut = x
else:
shortcut = self.shortcut(x)
return shortcut + output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from functools import partial
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_elu_0(in_ptr0, out_ptr2,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp24 = 0.0
tmp25 = tmp23 > tmp24
tmp26 = 1.0
tmp27 = tmp23 * tmp26
tmp28 = libdevice.expm1(tmp27)
tmp29 = tmp28 * tmp26
tmp30 = tl.where(tmp25, tmp27, tmp29)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp30, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_elu_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp2 - tmp12
tmp25 = tmp24 * tmp23
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 1.0
tmp29 = tmp25 * tmp28
tmp30 = libdevice.expm1(tmp29)
tmp31 = tmp30 * tmp28
tmp32 = tl.where(tmp27, tmp29, tmp31)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr1 + (r2 + 16 * x3), tmp32, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_elu_0[grid(16)](primals_1,
buf3, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1))
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf7 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf9 = reinterpret_tensor(buf7, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf7
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_per_fused__native_batch_norm_legit_convolution_elu_1[grid(16)](
buf5, buf9, primals_3, buf6, buf10, 16, 16, XBLOCK=1, num_warps
=2, num_stages=1)
del primals_3
buf11 = extern_kernels.convolution(buf10, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = buf11
del buf11
triton_poi_fused_add_convolution_2[grid(256)](buf12, primals_1,
primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf12, primals_2, primals_4, buf3, buf5, buf6, buf9, buf10
def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1,
init_scale=1.0, padding=1):
"""3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2."""
init_scale = 1e-10 if init_scale == 0 else init_scale
conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias,
dilation=dilation, padding=padding, kernel_size=3)
conv.weight.data *= init_scale
if bias:
conv.bias.data *= init_scale
return conv
def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1,
init_scale=1.0, padding=0):
"""1x1 convolution. Same as NCSNv1/v2."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=bias, dilation=dilation, padding=padding)
init_scale = 1e-10 if init_scale == 0 else init_scale
conv.weight.data *= init_scale
conv.bias.data *= init_scale
return conv
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return output
class ResidualBlockNew(nn.Module):
def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(),
normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1):
super().__init__()
self.non_linearity = act
self.input_dim = input_dim
self.output_dim = output_dim
self.resample = resample
self.normalization = normalization
if resample == 'down':
if dilation > 1:
self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=
dilation)
self.normalize2 = normalization(input_dim)
self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=
dilation)
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
else:
self.conv1 = ncsn_conv3x3(input_dim, input_dim)
self.normalize2 = normalization(input_dim)
self.conv2 = ConvMeanPool(input_dim, output_dim, 3,
adjust_padding=adjust_padding)
conv_shortcut = partial(ConvMeanPool, kernel_size=1,
adjust_padding=adjust_padding)
elif resample is None:
if dilation > 1:
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=
dilation)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=
dilation)
else:
conv_shortcut = partial(ncsn_conv1x1)
self.conv1 = ncsn_conv3x3(input_dim, output_dim)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim)
else:
raise Exception('invalid resample value')
if output_dim != input_dim or resample is not None:
self.shortcut = conv_shortcut(input_dim, output_dim)
self.normalize1 = normalization(input_dim)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| samsartor/score_sde | ResidualBlock | false | 7,607 | [
"Apache-2.0"
] | 1 | d25c8d092a68d643c796d771c55f80075aa041d1 | https://github.com/samsartor/score_sde/tree/d25c8d092a68d643c796d771c55f80075aa041d1 | import torch
import torch.nn as nn
from functools import partial
def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1,
init_scale=1.0, padding=1):
"""3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2."""
init_scale = 1e-10 if init_scale == 0 else init_scale
conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias,
dilation=dilation, padding=padding, kernel_size=3)
conv.weight.data *= init_scale
if bias:
conv.bias.data *= init_scale
return conv
def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1,
init_scale=1.0, padding=0):
"""1x1 convolution. Same as NCSNv1/v2."""
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=bias, dilation=dilation, padding=padding)
init_scale = 1e-10 if init_scale == 0 else init_scale
conv.weight.data *= init_scale
conv.bias.data *= init_scale
return conv
class ConvMeanPool(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size=3, biases=True,
adjust_padding=False):
super().__init__()
if not adjust_padding:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = conv
else:
conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1,
padding=kernel_size // 2, bias=biases)
self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv)
def forward(self, inputs):
output = self.conv(inputs)
output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2],
output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4.0
return output
class Model(nn.Module):
def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(),
normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1):
super().__init__()
self.non_linearity = act
self.input_dim = input_dim
self.output_dim = output_dim
self.resample = resample
self.normalization = normalization
if resample == 'down':
if dilation > 1:
self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=
dilation)
self.normalize2 = normalization(input_dim)
self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=
dilation)
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
else:
self.conv1 = ncsn_conv3x3(input_dim, input_dim)
self.normalize2 = normalization(input_dim)
self.conv2 = ConvMeanPool(input_dim, output_dim, 3,
adjust_padding=adjust_padding)
conv_shortcut = partial(ConvMeanPool, kernel_size=1,
adjust_padding=adjust_padding)
elif resample is None:
if dilation > 1:
conv_shortcut = partial(ncsn_conv3x3, dilation=dilation)
self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=
dilation)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=
dilation)
else:
conv_shortcut = partial(ncsn_conv1x1)
self.conv1 = ncsn_conv3x3(input_dim, output_dim)
self.normalize2 = normalization(output_dim)
self.conv2 = ncsn_conv3x3(output_dim, output_dim)
else:
raise Exception('invalid resample value')
if output_dim != input_dim or resample is not None:
self.shortcut = conv_shortcut(input_dim, output_dim)
self.normalize1 = normalization(input_dim)
def forward(self, x):
output = self.normalize1(x)
output = self.non_linearity(output)
output = self.conv1(output)
output
# ... truncated (>4000 chars) for memory efficiency |
LinActorCritic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/nc/cncwsucylpsg2zmlivjfxu6vbd64ztxjndlsix2ysjtby3xohgk4.py
# Topologically Sorted Source Nodes: [p_1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# p_1 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pi => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pi => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (1, 4), (4, 1))
assert_size_stride(primals_13, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [p_1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [p_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [p_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (64, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf7)
del primals_8
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [v_1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf8, primals_9, 256, grid=grid(256), stream=stream0)
del primals_9
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [v_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf10, primals_11, 256, grid=grid(256), stream=stream0)
del primals_11
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [v_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_13
return (buf6, reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, buf6, buf8, buf10, primals_12, primals_10, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class LinActorCritic(torch.nn.Module):
def __init__(self, actor_lr, epsilon, in_dim, h_dim, out_dim):
super(LinActorCritic, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.h_dim = h_dim
self.epsilon = epsilon
self.define_network()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu:0')
self
def normalize(self, tensor):
return (tensor - tensor.mean()) / (torch.std(tensor) + 1e-05)
def define_network(self):
self.relu = torch.nn.LeakyReLU()
self.leaky_relu = torch.nn.LeakyReLU()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax(dim=-1)
size = self.h_dim
self.p1 = torch.nn.Linear(self.in_dim, size)
self.p2 = torch.nn.Linear(size, size)
self.v1 = torch.nn.Linear(self.in_dim, size)
self.v2 = torch.nn.Linear(size, size)
self.pi = torch.nn.Linear(size, self.out_dim)
self.value = torch.nn.Linear(size, 1)
self.critic_loss = torch.nn.MSELoss()
def forward(self, x):
out = torch.Tensor(x).float()
p = self.p1(out)
p = self.tanh(p)
p = self.p2(p)
p = self.tanh(p)
p = self.pi(p)
pi = self.softmax(p)
v = self.v1(out)
v = self.tanh(v)
v = self.v2(v)
v = self.tanh(v)
v = self.value(v)
return pi, v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'actor_lr': 4, 'epsilon': 4, 'in_dim': 4, 'h_dim': 4,
'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (1, 4), (4, 1))
assert_size_stride(primals_13, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(256)](buf1, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_0[grid(256)](buf3, primals_5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (64, 4), (4, 1), 0)
del buf5
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf7)
del primals_8
buf8 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_tanh_0[grid(256)](buf8, primals_9, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_9
buf9 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf9)
buf10 = reinterpret_tensor(buf9, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf9
triton_poi_fused_tanh_0[grid(256)](buf10, primals_11, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_11
buf12 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf10, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_12, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_13
return (buf6, reinterpret_tensor(buf12, (4, 4, 4, 1), (16, 4, 1, 1), 0),
reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), buf1, buf3, buf6,
buf8, buf10, primals_12, primals_10, primals_6, primals_4)
class LinActorCriticNew(torch.nn.Module):
def __init__(self, actor_lr, epsilon, in_dim, h_dim, out_dim):
super(LinActorCriticNew, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.h_dim = h_dim
self.epsilon = epsilon
self.define_network()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu:0')
self
def normalize(self, tensor):
return (tensor - tensor.mean()) / (torch.std(tensor) + 1e-05)
def define_network(self):
self.relu = torch.nn.LeakyReLU()
self.leaky_relu = torch.nn.LeakyReLU()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax(dim=-1)
size = self.h_dim
self.p1 = torch.nn.Linear(self.in_dim, size)
self.p2 = torch.nn.Linear(size, size)
self.v1 = torch.nn.Linear(self.in_dim, size)
self.v2 = torch.nn.Linear(size, size)
self.pi = torch.nn.Linear(size, self.out_dim)
self.value = torch.nn.Linear(size, 1)
self.critic_loss = torch.nn.MSELoss()
def forward(self, input_0):
primals_2 = self.p1.weight
primals_3 = self.p1.bias
primals_4 = self.p2.weight
primals_5 = self.p2.bias
primals_6 = self.v1.weight
primals_7 = self.v1.bias
primals_8 = self.v2.weight
primals_9 = self.v2.bias
primals_10 = self.pi.weight
primals_11 = self.pi.bias
primals_12 = self.value.weight
primals_13 = self.value.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
| Gregory-Eales/mban | LinActorCritic | false | 7,608 | [
"Apache-2.0"
] | 1 | d8b35db51c7e601b1db777d9a80343600374250b | https://github.com/Gregory-Eales/mban/tree/d8b35db51c7e601b1db777d9a80343600374250b | import torch
class Model(torch.nn.Module):
def __init__(self, actor_lr, epsilon, in_dim, h_dim, out_dim):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.h_dim = h_dim
self.epsilon = epsilon
self.define_network()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu:0')
self
def normalize(self, tensor):
return (tensor - tensor.mean()) / (torch.std(tensor) + 1e-05)
def define_network(self):
self.relu = torch.nn.LeakyReLU()
self.leaky_relu = torch.nn.LeakyReLU()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax(dim=-1)
size = self.h_dim
self.p1 = torch.nn.Linear(self.in_dim, size)
self.p2 = torch.nn.Linear(size, size)
self.v1 = torch.nn.Linear(self.in_dim, size)
self.v2 = torch.nn.Linear(size, size)
self.pi = torch.nn.Linear(size, self.out_dim)
self.value = torch.nn.Linear(size, 1)
self.critic_loss = torch.nn.MSELoss()
def forward(self, x):
out = torch.Tensor(x).float()
p = self.p1(out)
p = self.tanh(p)
p = self.p2(p)
p = self.tanh(p)
p = self.pi(p)
pi = self.softmax(p)
v = self.v1(out)
v = self.tanh(v)
v = self.v2(v)
v = self.tanh(v)
v = self.value(v)
return pi, v
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'actor_lr': 4, 'epsilon': 4, 'in_dim': 4, 'h_dim': 4,
'out_dim': 4}]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/zu/czukwmrqjy753kgccmxfno5mifqkwe3ujuv24gi2543qttxvepnp.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1083392
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 8464) % 32
x0 = xindex % 8464
x4 = (xindex // 8464)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (8480*x4)), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3c/c3cdfchgiifuu4gutpnkzpk7iihkqjlpf3qh5jbkrhclj5pdx3ht.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 270848
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 46
x1 = (xindex // 46) % 46
x2 = (xindex // 2116)
x3 = xindex % 2116
tmp0 = tl.load(in_ptr0 + ((2*x0) + (184*x1) + (8480*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (184*x1) + (8480*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (92 + (2*x0) + (184*x1) + (8480*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (93 + (2*x0) + (184*x1) + (8480*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3 + (2144*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x3 + (2176*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/27/c27fu4if2dbg6r3xuyzvu4ns7tejhxlg7hyti2lbvsinmlpt2cop.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 451584
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 1764) % 64
x0 = xindex % 1764
x4 = (xindex // 1764)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + (1792*x4)), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3d/c3dpro5ff2oc7ig2ayzurdvfy7a5iy667pfpnj6536wstwf5kfzw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 112896
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 21
x1 = (xindex // 21) % 21
x4 = (xindex // 441)
x3 = (xindex // 28224)
x5 = xindex % 28224
x6 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (84*x1) + (1792*x4)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (84*x1) + (1792*x4)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (42 + (2*x0) + (84*x1) + (1792*x4)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (43 + (2*x0) + (84*x1) + (1792*x4)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x5 + (28288*x3)), tmp15, xmask)
tl.store(out_ptr1 + (x6), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xc/cxc6b6yaoqrxygbhhvqslfh3evd2idz6ndtwi246ntlpvok4xlz7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_3 => relu_2
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gc/cgcg5pmk2v6hznoxiobdlf6ffbpauayxdfl3coorau4syfigxt7c.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 96, 96), (9216, 9216, 96, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (1000, 28224), (28224, 1))
assert_size_stride(primals_7, (1000, ), (1, ))
assert_size_stride(primals_8, (500, 1000), (1000, 1))
assert_size_stride(primals_9, (500, ), (1, ))
assert_size_stride(primals_10, (136, 500), (500, 1))
assert_size_stride(primals_11, (136, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 92, 92), (270848, 8464, 92, 1))
buf1 = empty_strided_cuda((4, 32, 92, 92), (271360, 8480, 92, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf0, primals_2, buf1, 1083392, grid=grid(1083392), stream=stream0)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 32, 46, 46), (68608, 2144, 46, 1), torch.float32)
buf3 = empty_strided_cuda((4, 32, 46, 46), (69632, 2176, 46, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 270848, grid=grid(270848), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 42, 42), (112896, 1764, 42, 1))
buf5 = empty_strided_cuda((4, 64, 42, 42), (114688, 1792, 42, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, buf5, 451584, grid=grid(451584), stream=stream0)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 64, 21, 21), (28288, 441, 21, 1), torch.int8)
buf7 = empty_strided_cuda((4, 64, 21, 21), (28224, 441, 21, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 112896, grid=grid(112896), stream=stream0)
buf8 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf7, (4, 28224), (28224, 1), 0), reinterpret_tensor(primals_6, (28224, 1000), (1, 28224), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, primals_7, 4000, grid=grid(4000), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (1000, 500), (1, 1000), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 2000, grid=grid(2000), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 136), (136, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (500, 136), (1, 500), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 28224), (28224, 1), 0), buf9, buf11, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 96, 96), (9216, 9216, 96, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1000, 28224), (28224, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1000, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((500, 1000), (1000, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((500, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((136, 500), (500, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((136, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc5 = nn.Linear(64 * 21 * 21, 1000)
self.fc6 = nn.Linear(1000, 500)
self.fc7 = nn.Linear(500, 136)
self.fc6_drop = nn.Dropout(p=0.4)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 21 * 21)
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = self.fc7(x)
return x
def get_inputs():
return [torch.rand([4, 1, 96, 96])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 8464 % 32
x0 = xindex % 8464
x4 = xindex // 8464
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 8480 * x4), tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 270848
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 46
x1 = xindex // 46 % 46
x2 = xindex // 2116
x3 = xindex % 2116
tmp0 = tl.load(in_ptr0 + (2 * x0 + 184 * x1 + 8480 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 184 * x1 + 8480 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (92 + 2 * x0 + 184 * x1 + 8480 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (93 + 2 * x0 + 184 * x1 + 8480 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3 + 2144 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x3 + 2176 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 451584
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 1764 % 64
x0 = xindex % 1764
x4 = xindex // 1764
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x0 + 1792 * x4), tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 112896
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 21
x1 = xindex // 21 % 21
x4 = xindex // 441
x3 = xindex // 28224
x5 = xindex % 28224
x6 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 84 * x1 + 1792 * x4), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 84 * x1 + 1792 * x4), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (42 + 2 * x0 + 84 * x1 + 1792 * x4), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (43 + 2 * x0 + 84 * x1 + 1792 * x4), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x5 + 28288 * x3), tmp15, xmask)
tl.store(out_ptr1 + x6, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 4000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 1000
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 2000
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 500
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 96, 96), (9216, 9216, 96, 1))
assert_size_stride(primals_4, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (1000, 28224), (28224, 1))
assert_size_stride(primals_7, (1000,), (1,))
assert_size_stride(primals_8, (500, 1000), (1000, 1))
assert_size_stride(primals_9, (500,), (1,))
assert_size_stride(primals_10, (136, 500), (500, 1))
assert_size_stride(primals_11, (136,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 92, 92), (270848, 8464, 92, 1))
buf1 = empty_strided_cuda((4, 32, 92, 92), (271360, 8480, 92, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1083392)](buf0, primals_2,
buf1, 1083392, XBLOCK=1024, num_warps=4, num_stages=1)
del buf0
del primals_2
buf2 = empty_strided_cuda((4, 32, 46, 46), (68608, 2144, 46, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 32, 46, 46), (69632, 2176, 46, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(270848)](buf1, buf2,
buf3, 270848, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 42, 42), (112896, 1764, 42, 1))
buf5 = empty_strided_cuda((4, 64, 42, 42), (114688, 1792, 42, 1),
torch.float32)
triton_poi_fused_convolution_relu_2[grid(451584)](buf4, primals_5,
buf5, 451584, XBLOCK=512, num_warps=8, num_stages=1)
del buf4
del primals_5
buf6 = empty_strided_cuda((4, 64, 21, 21), (28288, 441, 21, 1),
torch.int8)
buf7 = empty_strided_cuda((4, 64, 21, 21), (28224, 441, 21, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_3[grid(112896)](buf5, buf6,
buf7, 112896, XBLOCK=512, num_warps=8, num_stages=1)
buf8 = empty_strided_cuda((4, 1000), (1000, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf7, (4, 28224), (28224, 1),
0), reinterpret_tensor(primals_6, (28224, 1000), (1, 28224), 0),
out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(4000)](buf9, primals_7, 4000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 500), (500, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_8, (1000, 500),
(1, 1000), 0), out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(2000)](buf11, primals_9, 2000, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 136), (136, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (500, 136), (1, 500), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 28224), (28224, 1), 0), buf9,
buf11, primals_10, primals_8, primals_6)
class NetNew(nn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc5 = nn.Linear(64 * 21 * 21, 1000)
self.fc6 = nn.Linear(1000, 500)
self.fc7 = nn.Linear(500, 136)
self.fc6_drop = nn.Dropout(p=0.4)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc5.weight
primals_7 = self.fc5.bias
primals_8 = self.fc6.weight
primals_9 = self.fc6.bias
primals_10 = self.fc7.weight
primals_11 = self.fc7.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| piyushpathak03/Facial-key-point-detection | Net | false | 7,609 | [
"Apache-2.0"
] | 1 | 863eeeac50c46befb17ecf7610cd341ea0e65291 | https://github.com/piyushpathak03/Facial-key-point-detection/tree/863eeeac50c46befb17ecf7610cd341ea0e65291 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 5)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 5)
self.pool2 = nn.MaxPool2d(2, 2)
self.fc5 = nn.Linear(64 * 21 * 21, 1000)
self.fc6 = nn.Linear(1000, 500)
self.fc7 = nn.Linear(500, 136)
self.fc6_drop = nn.Dropout(p=0.4)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 64 * 21 * 21)
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = self.fc7(x)
return x
def get_inputs():
return [torch.rand([4, 1, 96, 96])]
def get_init_inputs():
return []
|
BertImagePooler | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# pooled_output => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xk/cxkfjvxcrwrocrik25vel4gb2spp4jrbijo33ra4mgkw3hn2qgah.py
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# pooled_output => add
# pooled_output_1 => relu
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pooled_output], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [pooled_output, pooled_output_1], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf2, primals_3, buf3, 64, grid=grid(64), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.optim
import torch.utils.data
from torch import nn
import torch
class BertImagePooler(nn.Module):
def __init__(self, config):
super(BertImagePooler, self).__init__()
self.dense = nn.Linear(config.v_hidden_size, config.bi_hidden_size)
self.activation = nn.ReLU()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(v_hidden_size=4, bi_hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.optim
import torch.utils.data
from torch import nn
import torch
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(64)](buf2,
primals_3, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3
class BertImagePoolerNew(nn.Module):
def __init__(self, config):
super(BertImagePoolerNew, self).__init__()
self.dense = nn.Linear(config.v_hidden_size, config.bi_hidden_size)
self.activation = nn.ReLU()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ChoiIseungil/vilbert-multi-task | BertImagePooler | false | 7,610 | [
"MIT"
] | 1 | 37d14b9aed9c48117a820e05157c7ccd3dd20d5b | https://github.com/ChoiIseungil/vilbert-multi-task/tree/37d14b9aed9c48117a820e05157c7ccd3dd20d5b | from _paritybench_helpers import _mock_config
import torch
import torch.optim
import torch.utils.data
from torch import nn
import torch
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.v_hidden_size, config.bi_hidden_size)
self.activation = nn.ReLU()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net2 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/k3/ck34u2ihhbnbcv7k6gttuj4jtoheyxbtbsnyfudkwvvrmxpo2zpp.py
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp]
# Source node to ATen node mapping:
# clamp => clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {})
triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.testing._internal.common_utils import *
class MyRelu2(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
class Net2(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return MyRelu2.apply(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.testing._internal.common_utils import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MyRelu2(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
class Net2New(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| LexcaliburR/notebook | Net2 | false | 7,611 | [
"MIT"
] | 1 | 84a8f3801dff20d07caa0ed2584e722656fb5726 | https://github.com/LexcaliburR/notebook/tree/84a8f3801dff20d07caa0ed2584e722656fb5726 | import torch
import torch.nn as nn
from torch.testing._internal.common_utils import *
class MyRelu2(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.clamp(min=0)
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return MyRelu2.apply(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Conv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dj/cdjv22ecg2fa3vl42zmofymqajhwcoeyhd4zdqtxfjt47oopzskx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %view), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
def conv_downsample_2d(x, w, k=None, factor=2, gain=1):
"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
_outC, _inC, convH, convW = w.shape
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor + (convW - 1)
s = [factor, factor]
x = upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2, p //
2))
return F.conv2d(x, w, stride=s, padding=0)
def _shape(x, dim):
return x.shape[dim]
def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
Padding is performed only once at the beginning, not between the
operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
assert len(w.shape) == 4
convH = w.shape[2]
convW = w.shape[3]
inC = w.shape[1]
w.shape[0]
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * factor ** 2)
p = k.shape[0] - factor - (convW - 1)
stride = factor, factor
stride = [1, 1, factor, factor]
output_shape = (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1
) * factor + convW
output_padding = output_shape[0] - (_shape(x, 2) - 1) * stride[0
] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW
assert output_padding[0] >= 0 and output_padding[1] >= 0
num_groups = _shape(x, 1) // inC
w = torch.reshape(w, (num_groups, -1, inC, convH, convW))
w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)
w = torch.reshape(w, (num_groups * inC, -1, convH, convW))
x = F.conv_transpose2d(x, w, stride=stride, output_padding=
output_padding, padding=0)
return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 +
factor - 1, p // 2 + 1))
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Conv2d(nn.Module):
"""Conv2d layer with optimal upsampling and downsampling (StyleGAN2)."""
def __init__(self, in_ch, out_ch, kernel, up=False, down=False,
resample_kernel=(1, 3, 3, 1), use_bias=True, kernel_init=None):
super().__init__()
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel))
if kernel_init is not None:
self.weight.data = kernel_init(self.weight.data.shape)
if use_bias:
self.bias = nn.Parameter(torch.zeros(out_ch))
self.up = up
self.down = down
self.resample_kernel = resample_kernel
self.kernel = kernel
self.use_bias = use_bias
def forward(self, x):
if self.up:
x = upsample_conv_2d(x, self.weight, k=self.resample_kernel)
elif self.down:
x = conv_downsample_2d(x, self.weight, k=self.resample_kernel)
else:
x = F.conv2d(x, self.weight, stride=1, padding=self.kernel // 2)
if self.use_bias:
x = x + self.bias.reshape(1, -1, 1, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4, 'kernel': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
def conv_downsample_2d(x, w, k=None, factor=2, gain=1):
"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
_outC, _inC, convH, convW = w.shape
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor + (convW - 1)
s = [factor, factor]
x = upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2, p //
2))
return F.conv2d(x, w, stride=s, padding=0)
def _shape(x, dim):
return x.shape[dim]
def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
Padding is performed only once at the beginning, not between the
operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
nearest-neighbor upsampling.
factor: Integer upsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H * factor, W * factor]` or
`[N, H * factor, W * factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
assert len(w.shape) == 4
convH = w.shape[2]
convW = w.shape[3]
inC = w.shape[1]
w.shape[0]
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * (gain * factor ** 2)
p = k.shape[0] - factor - (convW - 1)
stride = factor, factor
stride = [1, 1, factor, factor]
output_shape = (_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1
) * factor + convW
output_padding = output_shape[0] - (_shape(x, 2) - 1) * stride[0
] - convH, output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW
assert output_padding[0] >= 0 and output_padding[1] >= 0
num_groups = _shape(x, 1) // inC
w = torch.reshape(w, (num_groups, -1, inC, convH, convW))
w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)
w = torch.reshape(w, (num_groups * inC, -1, convH, convW))
x = F.conv_transpose2d(x, w, stride=stride, output_padding=
output_padding, padding=0)
return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 +
factor - 1, p // 2 + 1))
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Conv2dNew(nn.Module):
"""Conv2d layer with optimal upsampling and downsampling (StyleGAN2)."""
def __init__(self, in_ch, out_ch, kernel, up=False, down=False,
resample_kernel=(1, 3, 3, 1), use_bias=True, kernel_init=None):
super().__init__()
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel))
if kernel_init is not None:
self.weight.data = kernel_init(self.weight.data.shape)
if use_bias:
self.bias = nn.Parameter(torch.zeros(out_ch))
self.up = up
self.down = down
self.resample_kernel = resample_kernel
self.kernel = kernel
self.use_bias = use_bias
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| samsartor/score_sde | Conv2d | false | 7,612 | [
"Apache-2.0"
] | 1 | d25c8d092a68d643c796d771c55f80075aa041d1 | https://github.com/samsartor/score_sde/tree/d25c8d092a68d643c796d771c55f80075aa041d1 | from torch.autograd import Function
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def _setup_kernel(k):
k = np.asarray(k, dtype=np.float32)
if k.ndim == 1:
k = np.outer(k, k)
k /= np.sum(k)
assert k.ndim == 2
assert k.shape[0] == k.shape[1]
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
def conv_downsample_2d(x, w, k=None, factor=2, gain=1):
"""Fused `tf.nn.conv2d()` followed by `downsample_2d()`.
Padding is performed only once at the beginning, not between the operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
w: Weight tensor of the shape `[filterH, filterW, inChannels,
outChannels]`. Grouped convolution can be performed by `inChannels =
x.shape[0] // numGroups`.
k: FIR filter of the shape `[firH, firW]` or `[firN]`
(separable). The default is `[1] * factor`, which corresponds to
average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
"""
assert isinstance(factor, int) and factor >= 1
_outC, _inC, convH, convW = w.shape
assert convW == convH
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor + (convW - 1)
s = [factor, factor]
x = upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2, p //
2))
return F.conv2d(x, w, stride=s, padding=0)
def _shape(x, dim):
return x.shape[dim]
def upsample_conv_2d(x, w, k=None, factor=2, gain=1):
"""Fused `upsample_2d()` followed by `tf.nn.conv2d()`.
Padding is performed only once at the beginning, not between the
operations.
The fused op is considerably more efficient than performing the same
calculation
using standard TensorFlow ops. It supports gradients of arbitrary order.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
C]`.
# ... truncated (>4000 chars) for memory efficiency |
BertOutput | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ai/cai32p2ssjvpyulvuzcicdszqe3thbavgxn4jeed6uatjnl7yq2s.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nk/cnkbkukjfarsysqlaadkg24xmqibk3adq5p7jyfnt6k6loydbn2r.py
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# hidden_states_2 => add_1, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mn/cmntyljhuirhsdjg2yosgzllpkpxqedxgoyk6gunquq2rf3kl7u5.py
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# hidden_states_2 => add_1, add_2, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-12), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_6), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_2, primals_4, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_5, primals_6, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_6
return (buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn
import torch.nn as nn
class BertOutput(nn.Module):
"""BERT output layer.
Based on: BERT (pytorch-transformer)
https://github.com/huggingface/transformers
"""
def __init__(self, config) ->None:
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_2, primals_4, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(256)](buf1, buf2, buf3,
primals_5, primals_6, buf4, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
del buf3
del primals_6
return buf4, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1
class BertOutputNew(nn.Module):
"""BERT output layer.
Based on: BERT (pytorch-transformer)
https://github.com/huggingface/transformers
"""
def __init__(self, config) ->None:
super(BertOutputNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.weight
primals_6 = self.LayerNorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Erotemic/MONAI | BertOutput | false | 7,613 | [
"Apache-2.0"
] | 1 | a9cd2d88168107281a2abcc2f63efaed80580e79 | https://github.com/Erotemic/MONAI/tree/a9cd2d88168107281a2abcc2f63efaed80580e79 | from _paritybench_helpers import _mock_config
import torch
import torch.nn
import torch.nn as nn
class Model(nn.Module):
"""BERT output layer.
Based on: BERT (pytorch-transformer)
https://github.com/huggingface/transformers
"""
def __init__(self, config) ->None:
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5)}]
|
BERTLowRank | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6s/c6shmuvjmq6zc4ifvdsynorwri47ra63qxa7jg3e7p6lw6xlqj5q.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_aug_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
# Source node to ATen node mapping:
# add => add
# erf => erf
# hidden_states_aug_1 => mul_1
# mul => mul
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_aug], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, hidden_states_aug_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BERTLowRank(nn.Module):
def __init__(self, config, extra_dim=None):
super(BERTLowRank, self).__init__()
if config.extra_dim:
self.aug_dense = nn.Linear(config.hidden_size, config.extra_dim)
self.aug_dense2 = nn.Linear(config.extra_dim, config.hidden_size)
else:
self.aug_dense = nn.Linear(config.hidden_size, config.
hidden_size_aug)
self.aug_dense2 = nn.Linear(config.hidden_size_aug, config.
hidden_size)
self.config = config
self.hidden_act_fn = gelu
def forward(self, hidden_states, attention_mask=None):
hidden_states_aug = self.aug_dense(hidden_states)
hidden_states_aug = self.hidden_act_fn(hidden_states_aug)
hidden_states = self.aug_dense2(hidden_states_aug)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(extra_dim=4, hidden_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class BERTLowRankNew(nn.Module):
def __init__(self, config, extra_dim=None):
super(BERTLowRankNew, self).__init__()
if config.extra_dim:
self.aug_dense = nn.Linear(config.hidden_size, config.extra_dim)
self.aug_dense2 = nn.Linear(config.extra_dim, config.hidden_size)
else:
self.aug_dense = nn.Linear(config.hidden_size, config.
hidden_size_aug)
self.aug_dense2 = nn.Linear(config.hidden_size_aug, config.
hidden_size)
self.config = config
self.hidden_act_fn = gelu
def forward(self, input_0):
primals_1 = self.aug_dense.weight
primals_2 = self.aug_dense.bias
primals_4 = self.aug_dense2.weight
primals_5 = self.aug_dense2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| DAQuestionAnswering/Bert-n-Pals | BERTLowRank | false | 7,614 | [
"MIT"
] | 1 | d5a288b9ac62259e70c249635108ba3906e19f00 | https://github.com/DAQuestionAnswering/Bert-n-Pals/tree/d5a288b9ac62259e70c249635108ba3906e19f00 | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class Model(nn.Module):
def __init__(self, config, extra_dim=None):
super().__init__()
if config.extra_dim:
self.aug_dense = nn.Linear(config.hidden_size, config.extra_dim)
self.aug_dense2 = nn.Linear(config.extra_dim, config.hidden_size)
else:
self.aug_dense = nn.Linear(config.hidden_size, config.
hidden_size_aug)
self.aug_dense2 = nn.Linear(config.hidden_size_aug, config.
hidden_size)
self.config = config
self.hidden_act_fn = gelu
def forward(self, hidden_states, attention_mask=None):
hidden_states_aug = self.aug_dense(hidden_states)
hidden_states_aug = self.hidden_act_fn(hidden_states_aug)
hidden_states = self.aug_dense2(hidden_states_aug)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/e4/ce4ut4ww7buakyddawvmwd3rmw676rn5srdsnhgej6ymkbabmvjf.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => add, add_1, mul, mul_1, repeat, rsqrt, sub, var_mean
# x_2 => gt, mul_2, where
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4]), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_3), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul_2), kwargs = {})
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + (x0 % 64), xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.2
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp32, xmask)
tl.store(out_ptr3 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jr/cjrkvgqsrkrzes3byvkb3awyeg2zhynh6a2iawul33larixsuldg.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# x_4 => add_2, add_3, mul_3, mul_4, repeat_2, rsqrt_1, sub_1, var_mean_1
# x_5 => gt_1, mul_5, where_1
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_6, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %getitem_3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %unsqueeze_5), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %unsqueeze_7), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_6, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, 0.2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_6, %mul_5), kwargs = {})
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 512
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (x0 % 128), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (16*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + (x0 % 128), xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.2
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r1 + (16*x0)), tmp32, xmask)
tl.store(out_ptr3 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gv/cgvfdus7jwptj4n5skzvebfi3jfv4w7qsrr4jlwca3gfocsavk3y.py
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# x_7 => add_4, add_5, mul_6, mul_7, repeat_4, rsqrt_2, sub_2, var_mean_2
# x_8 => gt_2, mul_8, where_2
# Graph fragment:
# %repeat_4 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_9, [4]), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_10, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_10, %getitem_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %unsqueeze_9), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_11), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_11, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 0.2), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %view_11, %mul_8), kwargs = {})
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + (x0 % 64), xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 64.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.2
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r1 + (64*x0)), tmp32, xmask)
tl.store(out_ptr3 + (x0), tmp23, xmask)
tl.store(out_ptr1 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2r/c2rgxkvktgswate5x7dgwmb5sjxwvmvzazgjuhvftuu4sgwmpwyu.py
# Topologically Sorted Source Nodes: [x_13, x_14], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# x_13 => add_8, add_9, mul_12, mul_13, repeat_8, rsqrt_4, sub_4, var_mean_4
# x_14 => gt_4, mul_14, where_4
# Graph fragment:
# %repeat_8 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_15, [4]), kwargs = {})
# %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_20, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_8,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_20, %getitem_9), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %rsqrt_4), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_12, %unsqueeze_17), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_13, %unsqueeze_19), kwargs = {})
# %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_21, 0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_21, 0.2), kwargs = {})
# %where_4 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %view_21, %mul_14), kwargs = {})
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (x0 % 32), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (256*x0)), None)
tmp23 = tl.load(in_ptr2 + (x0 % 32), None, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.full([1], 256, tl.int32)
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp6 / tmp8
tmp10 = tmp2 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp1 - tmp9
tmp16 = 256.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp15 * tmp20
tmp22 = tmp21 * tmp0
tmp24 = tmp22 + tmp23
tmp25 = 0.0
tmp26 = tmp24 > tmp25
tmp27 = 0.2
tmp28 = tmp24 * tmp27
tmp29 = tl.where(tmp26, tmp24, tmp28)
tl.store(out_ptr0 + (x0), tmp0, None)
tl.store(in_out_ptr0 + (r1 + (256*x0)), tmp29, None)
tl.store(out_ptr3 + (x0), tmp20, None)
tl.store(out_ptr1 + (x0), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vc/cvcyexgjzbawzvi6qrlbzazrqv5uuq4va3sn4vms6b6t2ach7rfn.py
# Topologically Sorted Source Nodes: [x_19, x_20], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# x_19 => add_12, add_13, mul_18, mul_19, repeat_12, rsqrt_6, sub_6, var_mean_6
# x_20 => gt_6, mul_20, where_6
# Graph fragment:
# %repeat_12 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_21, [4]), kwargs = {})
# %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-05), kwargs = {})
# %rsqrt_6 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_12,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_30, %getitem_13), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %rsqrt_6), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_18, %unsqueeze_25), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_19, %unsqueeze_27), kwargs = {})
# %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_31, 0), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, 0.2), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %view_31, %mul_20), kwargs = {})
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (x0 % 32), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + (1024*x0)), None)
tmp23 = tl.load(in_ptr2 + (x0 % 32), None, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.full([1], 1024, tl.int32)
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp6 / tmp8
tmp10 = tmp2 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp1 - tmp9
tmp16 = 1024.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp15 * tmp20
tmp22 = tmp21 * tmp0
tmp24 = tmp22 + tmp23
tmp25 = 0.0
tmp26 = tmp24 > tmp25
tmp27 = 0.2
tmp28 = tmp24 * tmp27
tmp29 = tl.where(tmp26, tmp24, tmp28)
tl.store(out_ptr0 + (x0), tmp0, None)
tl.store(in_out_ptr0 + (r1 + (1024*x0)), tmp29, None)
tl.store(out_ptr3 + (x0), tmp20, None)
tl.store(out_ptr1 + (x0), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hk/chkmgqrf2g7aokarbesbyfxruktunzt7g7tyyi6esdpawylkyglh.py
# Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
# Source node to ATen node mapping:
# x_22 => add_14, add_15, mul_21, mul_22, repeat_14, rsqrt_7, sub_7, var_mean_7
# x_23 => gt_7, mul_23, where_7
# Graph fragment:
# %repeat_14 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_24, [4]), kwargs = {})
# %var_mean_7 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_35, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_14, 1e-05), kwargs = {})
# %rsqrt_7 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_14,), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_35, %getitem_15), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %rsqrt_7), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_21, %unsqueeze_29), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_22, %unsqueeze_31), kwargs = {})
# %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_36, 0), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_36, 0.2), kwargs = {})
# %where_7 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %view_36, %mul_23), kwargs = {})
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_5 = async_compile.triton('triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[64, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 64
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 16), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x0), tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = triton_helpers.welford_reduce(
tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(
tmp3_mean, tmp3_m2, tmp3_weight, 1
)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tl.store(out_ptr1 + (x0), tmp3, xmask)
tmp15 = tl.load(in_ptr2 + (x0 % 16), xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.2
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(in_out_ptr0 + (r1 + (4096*x0)), tmp21, rmask & xmask)
tmp22 = 4096.0
tmp23 = tmp4 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tl.store(out_ptr3 + (x0), tmp26, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/to/ctopyqo3humni56cuwcrdh4cm3ly76mamnbambzyfhrxb36jpj37.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_25 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_8,), kwargs = {})
triton_poi_fused_sigmoid_6 = async_compile.triton('triton_poi_fused_sigmoid_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_6(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + (x0), tmp1, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26 = args
args.clear()
assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, ), (1, ))
assert_size_stride(primals_5, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_6, (128, ), (1, ))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (64, ), (1, ))
assert_size_stride(primals_11, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_12, (64, ), (1, ))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_15, (32, ), (1, ))
assert_size_stride(primals_16, (32, ), (1, ))
assert_size_stride(primals_17, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_18, (32, ), (1, ))
assert_size_stride(primals_19, (32, ), (1, ))
assert_size_stride(primals_20, (32, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_21, (32, ), (1, ))
assert_size_stride(primals_22, (32, ), (1, ))
assert_size_stride(primals_23, (32, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_24, (16, ), (1, ))
assert_size_stride(primals_25, (16, ), (1, ))
assert_size_stride(primals_26, (4, 16, 1, 1), (16, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf6 = empty_strided_cuda((1, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
buf7 = reinterpret_tensor(buf6, (4, 64, 4, 4), (1024, 16, 4, 1), 0); del buf6 # reuse
buf5 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_0.run(buf7, primals_3, buf0, primals_4, buf1, buf2, buf5, 256, 16, grid=grid(256), stream=stream0)
del primals_3
del primals_4
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 4, 4), (2048, 16, 4, 1))
buf9 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf10 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf14 = empty_strided_cuda((1, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
buf15 = reinterpret_tensor(buf14, (4, 128, 4, 4), (2048, 16, 4, 1), 0); del buf14 # reuse
buf13 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_1.run(buf15, primals_6, buf8, primals_7, buf9, buf10, buf13, 512, 16, grid=grid(512), stream=stream0)
del primals_6
del primals_7
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 8, 8), (4096, 64, 8, 1))
buf17 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf18 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf22 = empty_strided_cuda((1, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
buf23 = reinterpret_tensor(buf22, (4, 64, 8, 8), (4096, 64, 8, 1), 0); del buf22 # reuse
buf21 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2.run(buf23, primals_9, buf16, primals_10, buf17, buf18, buf21, 256, 64, grid=grid(256), stream=stream0)
del primals_10
del primals_9
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 64, 8, 8), (4096, 64, 8, 1))
buf25 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf26 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf30 = empty_strided_cuda((1, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
buf31 = reinterpret_tensor(buf30, (4, 64, 8, 8), (4096, 64, 8, 1), 0); del buf30 # reuse
buf29 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2.run(buf31, primals_12, buf24, primals_13, buf25, buf26, buf29, 256, 64, grid=grid(256), stream=stream0)
del primals_12
del primals_13
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_14, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 32, 16, 16), (8192, 256, 16, 1))
buf33 = empty_strided_cuda((128, ), (1, ), torch.float32)
buf34 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf38 = empty_strided_cuda((1, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf39 = reinterpret_tensor(buf38, (4, 32, 16, 16), (8192, 256, 16, 1), 0); del buf38 # reuse
buf37 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [x_13, x_14], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3.run(buf39, primals_15, buf32, primals_16, buf33, buf34, buf37, 128, 256, grid=grid(128), stream=stream0)
del primals_15
del primals_16
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, primals_17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 32, 16, 16), (8192, 256, 16, 1))
buf41 = empty_strided_cuda((128, ), (1, ), torch.float32)
buf42 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf46 = empty_strided_cuda((1, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
buf47 = reinterpret_tensor(buf46, (4, 32, 16, 16), (8192, 256, 16, 1), 0); del buf46 # reuse
buf45 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [x_16, x_17], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3.run(buf47, primals_18, buf40, primals_19, buf41, buf42, buf45, 128, 256, grid=grid(128), stream=stream0)
del primals_18
del primals_19
# Topologically Sorted Source Nodes: [x_18], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, primals_20, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf49 = empty_strided_cuda((128, ), (1, ), torch.float32)
buf50 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf54 = empty_strided_cuda((1, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
buf55 = reinterpret_tensor(buf54, (4, 32, 32, 32), (32768, 1024, 32, 1), 0); del buf54 # reuse
buf53 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [x_19, x_20], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_4.run(buf55, primals_21, buf48, primals_22, buf49, buf50, buf53, 128, 1024, grid=grid(128), stream=stream0)
del primals_21
del primals_22
# Topologically Sorted Source Nodes: [x_21], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_23, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf57 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf58 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
buf62 = empty_strided_cuda((1, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf63 = reinterpret_tensor(buf62, (4, 16, 64, 64), (65536, 4096, 64, 1), 0); del buf62 # reuse
buf61 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_22, x_23], Original ATen: [aten.repeat, aten._native_batch_norm_legit, aten.leaky_relu]
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_5.run(buf63, primals_24, buf56, primals_25, buf57, buf58, buf61, 64, 4096, grid=grid(64), stream=stream0)
del primals_24
del primals_25
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf63, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf65 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_6.run(buf65, 65536, grid=grid(65536), stream=stream0)
return (buf65, primals_1, primals_2, primals_5, primals_8, primals_11, primals_14, primals_17, primals_20, primals_23, primals_26, buf0, buf1, reinterpret_tensor(buf5, (256, ), (1, ), 0), buf7, buf8, buf9, reinterpret_tensor(buf13, (512, ), (1, ), 0), buf15, buf16, buf17, reinterpret_tensor(buf21, (256, ), (1, ), 0), buf23, buf24, buf25, reinterpret_tensor(buf29, (256, ), (1, ), 0), buf31, buf32, buf33, reinterpret_tensor(buf37, (128, ), (1, ), 0), buf39, buf40, buf41, reinterpret_tensor(buf45, (128, ), (1, ), 0), buf47, buf48, buf49, reinterpret_tensor(buf53, (128, ), (1, ), 0), buf55, buf56, buf57, reinterpret_tensor(buf61, (64, ), (1, ), 0), buf63, buf65, reinterpret_tensor(buf58, (1, 64, 1, 1), (64, 1, 1, 1), 0), reinterpret_tensor(buf50, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf42, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf34, (1, 128, 1, 1), (128, 1, 1, 1), 0), reinterpret_tensor(buf26, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf18, (1, 256, 1, 1), (256, 1, 1, 1), 0), reinterpret_tensor(buf10, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf2, (1, 256, 1, 1), (256, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((32, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((32, 16, 4, 4), (256, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class INConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, ins_n=True, bias=False):
super(INConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
class INDeConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, out_padding=0, dilation=1, groups=1, relu=True, ins_n=
True, bias=False):
super(INDeConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, output_padding=
out_padding, dilation=dilation, groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
class Decoder(nn.Module):
def __init__(self, img_channel):
super(Decoder, self).__init__()
self.deconv1 = INDeConv(in_planes=32, out_planes=64, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation1 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv2 = INDeConv(in_planes=64, out_planes=128, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation2 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv3 = INDeConv(in_planes=128, out_planes=64, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation3 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv4 = INDeConv(in_planes=64, out_planes=64, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation4 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv5 = INDeConv(in_planes=64, out_planes=32, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation5 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv6 = INDeConv(in_planes=32, out_planes=32, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation6 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv7 = INDeConv(in_planes=32, out_planes=32, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation7 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv8 = INDeConv(in_planes=32, out_planes=16, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation8 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.output = INConv(in_planes=16, out_planes=img_channel,
kernel_size=1, stride=1, padding=0, ins_n=False, relu=False)
self.activation9 = nn.Sigmoid()
def forward(self, x):
x = self.activation1(self.deconv1(x))
x = self.activation2(self.deconv2(x))
x = self.activation3(self.deconv3(x))
x = self.activation4(self.deconv4(x))
x = self.activation5(self.deconv5(x))
x = self.activation6(self.deconv6(x))
x = self.activation7(self.deconv7(x))
x = self.activation8(self.deconv8(x))
x = self.activation9(self.output(x))
return x
def get_inputs():
return [torch.rand([4, 32, 4, 4])]
def get_init_inputs():
return [[], {'img_channel': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x0 % 64, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.2
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp32, xmask)
tl.store(out_ptr3 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_1(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 512
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + x0 % 128, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x0 % 128, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 16.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.2
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r1 + 16 * x0), tmp32, xmask)
tl.store(out_ptr3 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr2 + x0 % 64, xmask, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tl.where(xmask, tmp2, 0)
tmp5 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp2 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp1 - tmp11
tmp19 = 64.0
tmp20 = tmp17 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tmp24 = tmp18 * tmp23
tmp25 = tmp24 * tmp0
tmp27 = tmp25 + tmp26
tmp28 = 0.0
tmp29 = tmp27 > tmp28
tmp30 = 0.2
tmp31 = tmp27 * tmp30
tmp32 = tl.where(tmp29, tmp27, tmp31)
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r1 + 64 * x0), tmp32, xmask)
tl.store(out_ptr3 + x0, tmp23, xmask)
tl.store(out_ptr1 + x0, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + x0 % 32, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 256 * x0), None)
tmp23 = tl.load(in_ptr2 + x0 % 32, None, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.full([1], 256, tl.int32)
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp6 / tmp8
tmp10 = tmp2 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp1 - tmp9
tmp16 = 256.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp15 * tmp20
tmp22 = tmp21 * tmp0
tmp24 = tmp22 + tmp23
tmp25 = 0.0
tmp26 = tmp24 > tmp25
tmp27 = 0.2
tmp28 = tmp24 * tmp27
tmp29 = tl.where(tmp26, tmp24, tmp28)
tl.store(out_ptr0 + x0, tmp0, None)
tl.store(in_out_ptr0 + (r1 + 256 * x0), tmp29, None)
tl.store(out_ptr3 + x0, tmp20, None)
tl.store(out_ptr1 + x0, tmp9, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + x0 % 32, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (r1 + 1024 * x0), None)
tmp23 = tl.load(in_ptr2 + x0 % 32, None, eviction_policy='evict_last')
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = tl.broadcast_to(tmp2, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.full([1], 1024, tl.int32)
tmp8 = tmp7.to(tl.float32)
tmp9 = tmp6 / tmp8
tmp10 = tmp2 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = tmp1 - tmp9
tmp16 = 1024.0
tmp17 = tmp14 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tmp21 = tmp15 * tmp20
tmp22 = tmp21 * tmp0
tmp24 = tmp22 + tmp23
tmp25 = 0.0
tmp26 = tmp24 > tmp25
tmp27 = 0.2
tmp28 = tmp24 * tmp27
tmp29 = tl.where(tmp26, tmp24, tmp28)
tl.store(out_ptr0 + x0, tmp0, None)
tl.store(in_out_ptr0 + (r1 + 1024 * x0), tmp29, None)
tl.store(out_ptr3 + x0, tmp20, None)
tl.store(out_ptr1 + x0, tmp9, None)
@triton.jit
def triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_5(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel,
XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 64
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 16, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x0, tmp0, xmask)
tmp3_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp3_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp1 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3_mean_next, tmp3_m2_next, tmp3_weight_next = (triton_helpers.
welford_reduce(tmp2, tmp3_mean, tmp3_m2, tmp3_weight, roffset == 0)
)
tmp3_mean = tl.where(rmask & xmask, tmp3_mean_next, tmp3_mean)
tmp3_m2 = tl.where(rmask & xmask, tmp3_m2_next, tmp3_m2)
tmp3_weight = tl.where(rmask & xmask, tmp3_weight_next, tmp3_weight)
tmp3_tmp, tmp4_tmp, tmp5_tmp = triton_helpers.welford(tmp3_mean,
tmp3_m2, tmp3_weight, 1)
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5_tmp[:, None]
tl.store(out_ptr1 + x0, tmp3, xmask)
tmp15 = tl.load(in_ptr2 + x0 % 16, xmask, eviction_policy='evict_last')
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp6 = tl.load(in_ptr1 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp7 = tmp6 - tmp3
tmp8 = 4096.0
tmp9 = tmp4 / tmp8
tmp10 = 1e-05
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp7 * tmp12
tmp14 = tmp13 * tmp0
tmp16 = tmp14 + tmp15
tmp17 = 0.0
tmp18 = tmp16 > tmp17
tmp19 = 0.2
tmp20 = tmp16 * tmp19
tmp21 = tl.where(tmp18, tmp16, tmp20)
tl.store(in_out_ptr0 + (r1 + 4096 * x0), tmp21, rmask & xmask)
tmp22 = 4096.0
tmp23 = tmp4 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tl.store(out_ptr3 + x0, tmp26, xmask)
@triton.jit
def triton_poi_fused_sigmoid_6(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.sigmoid(tmp0)
tl.store(in_out_ptr0 + x0, tmp1, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26) = args
args.clear()
assert_size_stride(primals_1, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_2, (4, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64,), (1,))
assert_size_stride(primals_5, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_6, (128,), (1,))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64,), (1,))
assert_size_stride(primals_11, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_12, (64,), (1,))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32,), (1,))
assert_size_stride(primals_17, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_18, (32,), (1,))
assert_size_stride(primals_19, (32,), (1,))
assert_size_stride(primals_20, (32, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_21, (32,), (1,))
assert_size_stride(primals_22, (32,), (1,))
assert_size_stride(primals_23, (32, 16, 4, 4), (256, 16, 4, 1))
assert_size_stride(primals_24, (16,), (1,))
assert_size_stride(primals_25, (16,), (1,))
assert_size_stride(primals_26, (4, 16, 1, 1), (16, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = empty_strided_cuda((256,), (1,), torch.float32)
buf2 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
buf6 = empty_strided_cuda((1, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
buf7 = reinterpret_tensor(buf6, (4, 64, 4, 4), (1024, 16, 4, 1), 0)
del buf6
buf5 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch
.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_0[grid(256)
](buf7, primals_3, buf0, primals_4, buf1, buf2, buf5, 256, 16,
XBLOCK=32, num_warps=4, num_stages=1)
del primals_3
del primals_4
buf8 = extern_kernels.convolution(buf7, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 128, 4, 4), (2048, 16, 4, 1))
buf9 = empty_strided_cuda((512,), (1,), torch.float32)
buf10 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf14 = empty_strided_cuda((1, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
buf15 = reinterpret_tensor(buf14, (4, 128, 4, 4), (2048, 16, 4, 1), 0)
del buf14
buf13 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_1[grid(512)
](buf15, primals_6, buf8, primals_7, buf9, buf10, buf13, 512,
16, XBLOCK=32, num_warps=4, num_stages=1)
del primals_6
del primals_7
buf16 = extern_kernels.convolution(buf15, primals_8, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 64, 8, 8), (4096, 64, 8, 1))
buf17 = empty_strided_cuda((256,), (1,), torch.float32)
buf18 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf22 = empty_strided_cuda((1, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf23 = reinterpret_tensor(buf22, (4, 64, 8, 8), (4096, 64, 8, 1), 0)
del buf22
buf21 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2[grid(256)
](buf23, primals_9, buf16, primals_10, buf17, buf18, buf21, 256,
64, XBLOCK=8, num_warps=4, num_stages=1)
del primals_10
del primals_9
buf24 = extern_kernels.convolution(buf23, primals_11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 64, 8, 8), (4096, 64, 8, 1))
buf25 = empty_strided_cuda((256,), (1,), torch.float32)
buf26 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf30 = empty_strided_cuda((1, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
buf31 = reinterpret_tensor(buf30, (4, 64, 8, 8), (4096, 64, 8, 1), 0)
del buf30
buf29 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_2[grid(256)
](buf31, primals_12, buf24, primals_13, buf25, buf26, buf29,
256, 64, XBLOCK=8, num_warps=4, num_stages=1)
del primals_12
del primals_13
buf32 = extern_kernels.convolution(buf31, primals_14, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 32, 16, 16), (8192, 256, 16, 1))
buf33 = empty_strided_cuda((128,), (1,), torch.float32)
buf34 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf38 = empty_strided_cuda((1, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf39 = reinterpret_tensor(buf38, (4, 32, 16, 16), (8192, 256, 16,
1), 0)
del buf38
buf37 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3[grid(128)
](buf39, primals_15, buf32, primals_16, buf33, buf34, buf37,
128, 256, num_warps=2, num_stages=1)
del primals_15
del primals_16
buf40 = extern_kernels.convolution(buf39, primals_17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 32, 16, 16), (8192, 256, 16, 1))
buf41 = empty_strided_cuda((128,), (1,), torch.float32)
buf42 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf46 = empty_strided_cuda((1, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
buf47 = reinterpret_tensor(buf46, (4, 32, 16, 16), (8192, 256, 16,
1), 0)
del buf46
buf45 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_3[grid(128)
](buf47, primals_18, buf40, primals_19, buf41, buf42, buf45,
128, 256, num_warps=2, num_stages=1)
del primals_18
del primals_19
buf48 = extern_kernels.convolution(buf47, primals_20, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf49 = empty_strided_cuda((128,), (1,), torch.float32)
buf50 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf54 = empty_strided_cuda((1, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
buf55 = reinterpret_tensor(buf54, (4, 32, 32, 32), (32768, 1024, 32,
1), 0)
del buf54
buf53 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
triton_per_fused__native_batch_norm_legit_leaky_relu_repeat_4[grid(128)
](buf55, primals_21, buf48, primals_22, buf49, buf50, buf53,
128, 1024, num_warps=8, num_stages=1)
del primals_21
del primals_22
buf56 = extern_kernels.convolution(buf55, primals_23, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 16, 64, 64), (65536, 4096, 64, 1))
buf57 = empty_strided_cuda((64,), (1,), torch.float32)
buf58 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
buf62 = empty_strided_cuda((1, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf63 = reinterpret_tensor(buf62, (4, 16, 64, 64), (65536, 4096, 64,
1), 0)
del buf62
buf61 = empty_strided_cuda((1, 64, 1, 1), (64, 1, 64, 64), torch.
float32)
triton_red_fused__native_batch_norm_legit_leaky_relu_repeat_5[grid(64)
](buf63, primals_24, buf56, primals_25, buf57, buf58, buf61, 64,
4096, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1)
del primals_24
del primals_25
buf64 = extern_kernels.convolution(buf63, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf65 = buf64
del buf64
triton_poi_fused_sigmoid_6[grid(65536)](buf65, 65536, XBLOCK=512,
num_warps=4, num_stages=1)
return (buf65, primals_1, primals_2, primals_5, primals_8, primals_11,
primals_14, primals_17, primals_20, primals_23, primals_26, buf0,
buf1, reinterpret_tensor(buf5, (256,), (1,), 0), buf7, buf8, buf9,
reinterpret_tensor(buf13, (512,), (1,), 0), buf15, buf16, buf17,
reinterpret_tensor(buf21, (256,), (1,), 0), buf23, buf24, buf25,
reinterpret_tensor(buf29, (256,), (1,), 0), buf31, buf32, buf33,
reinterpret_tensor(buf37, (128,), (1,), 0), buf39, buf40, buf41,
reinterpret_tensor(buf45, (128,), (1,), 0), buf47, buf48, buf49,
reinterpret_tensor(buf53, (128,), (1,), 0), buf55, buf56, buf57,
reinterpret_tensor(buf61, (64,), (1,), 0), buf63, buf65,
reinterpret_tensor(buf58, (1, 64, 1, 1), (64, 1, 1, 1), 0),
reinterpret_tensor(buf50, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf42, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf34, (1, 128, 1, 1), (128, 1, 1, 1), 0),
reinterpret_tensor(buf26, (1, 256, 1, 1), (256, 1, 1, 1), 0),
reinterpret_tensor(buf18, (1, 256, 1, 1), (256, 1, 1, 1), 0),
reinterpret_tensor(buf10, (1, 512, 1, 1), (512, 1, 1, 1), 0),
reinterpret_tensor(buf2, (1, 256, 1, 1), (256, 1, 1, 1), 0))
class INConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, ins_n=True, bias=False):
super(INConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
class INDeConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, out_padding=0, dilation=1, groups=1, relu=True, ins_n=
True, bias=False):
super(INDeConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, output_padding=
out_padding, dilation=dilation, groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
class DecoderNew(nn.Module):
def __init__(self, img_channel):
super(DecoderNew, self).__init__()
self.deconv1 = INDeConv(in_planes=32, out_planes=64, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation1 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv2 = INDeConv(in_planes=64, out_planes=128, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation2 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv3 = INDeConv(in_planes=128, out_planes=64, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation3 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv4 = INDeConv(in_planes=64, out_planes=64, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation4 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv5 = INDeConv(in_planes=64, out_planes=32, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation5 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv6 = INDeConv(in_planes=32, out_planes=32, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation6 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv7 = INDeConv(in_planes=32, out_planes=32, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation7 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv8 = INDeConv(in_planes=32, out_planes=16, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation8 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.output = INConv(in_planes=16, out_planes=img_channel,
kernel_size=1, stride=1, padding=0, ins_n=False, relu=False)
self.activation9 = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.deconv1.conv.weight
primals_3 = self.deconv1.ins_n.weight
primals_4 = self.deconv1.ins_n.bias
primals_5 = self.deconv2.conv.weight
primals_6 = self.deconv2.ins_n.weight
primals_7 = self.deconv2.ins_n.bias
primals_8 = self.deconv3.conv.weight
primals_9 = self.deconv3.ins_n.weight
primals_10 = self.deconv3.ins_n.bias
primals_11 = self.deconv4.conv.weight
primals_12 = self.deconv4.ins_n.weight
primals_13 = self.deconv4.ins_n.bias
primals_14 = self.deconv5.conv.weight
primals_15 = self.deconv5.ins_n.weight
primals_16 = self.deconv5.ins_n.bias
primals_17 = self.deconv6.conv.weight
primals_18 = self.deconv6.ins_n.weight
primals_19 = self.deconv6.ins_n.bias
primals_20 = self.deconv7.conv.weight
primals_21 = self.deconv7.ins_n.weight
primals_22 = self.deconv7.ins_n.bias
primals_23 = self.deconv8.conv.weight
primals_24 = self.deconv8.ins_n.weight
primals_25 = self.deconv8.ins_n.bias
primals_26 = self.output.conv.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26])
return output[0]
| samsgood0310/Unsupervised-Defect-Segmentation | Decoder | false | 7,615 | [
"Apache-2.0"
] | 1 | 66af32506cd6e60c356890616e28d679622fd8e6 | https://github.com/samsgood0310/Unsupervised-Defect-Segmentation/tree/66af32506cd6e60c356890616e28d679622fd8e6 | import torch
import torch.nn as nn
class INConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, dilation=1, groups=1, relu=True, ins_n=True, bias=False):
super().__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
class INDeConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1,
padding=0, out_padding=0, dilation=1, groups=1, relu=True, ins_n=
True, bias=False):
super().__init__()
self.out_channels = out_planes
self.conv = nn.ConvTranspose2d(in_planes, out_planes, kernel_size=
kernel_size, stride=stride, padding=padding, output_padding=
out_padding, dilation=dilation, groups=groups, bias=bias)
self.ins_n = nn.InstanceNorm2d(out_planes, affine=True
) if ins_n else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.ins_n is not None:
x = self.ins_n(x)
if self.relu is not None:
x = self.relu(x)
return x
class Model(nn.Module):
def __init__(self, img_channel):
super().__init__()
self.deconv1 = INDeConv(in_planes=32, out_planes=64, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation1 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv2 = INDeConv(in_planes=64, out_planes=128, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation2 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv3 = INDeConv(in_planes=128, out_planes=64, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation3 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv4 = INDeConv(in_planes=64, out_planes=64, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation4 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv5 = INDeConv(in_planes=64, out_planes=32, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation5 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv6 = INDeConv(in_planes=32, out_planes=32, kernel_size=3,
stride=1, padding=1, relu=False)
self.activation6 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv7 = INDeConv(in_planes=32, out_planes=32, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation7 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.deconv8 = INDeConv(in_planes=32, out_planes=16, kernel_size=4,
stride=2, padding=1, relu=False)
self.activation8 = nn.LeakyReLU(inplace=True, negative_slope=0.2)
self.output = INConv(in_planes=16, out_planes=img_channel,
kernel_size=1, stride=1, padding=0, ins_n=False, relu=False)
self.activation9 = nn.Sigmoid()
def forward(self, x):
x = self.activation1(self.deconv1(x))
x = self.activation2(self.deconv2(x))
x = self.activation3(self.deconv3(x))
x = self.activation4(self.deconv4(x))
x = self.activation5(self.deconv5(x))
x = self.activation6(self.deconv6(x))
x = self.activation7(self.deconv7(x))
x = self.activation8(self.deconv8(x))
x = self.activation9(self.output(x))
return x
def get_inputs():
return [torch.rand([4, 32, 4, 4])]
def
# ... truncated (>4000 chars) for memory efficiency |
FrameAvgPool | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/fk/cfkcunh3plyysuvib63zgkougyqv2ia22pa4qcifvxy3tij7w7nx.py
# Topologically Sorted Source Nodes: [vis_h], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# vis_h => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l5/cl5sxfxzphslwufowywi3gzknqkh7vzlbg5ksmlhwebbayh7gahv.py
# Topologically Sorted Source Nodes: [vis_h_1], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# vis_h_1 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze_1, [1, 4], [1, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [vis_h], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 16, grid=grid(16), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [vis_h_1], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf1, buf2, 4, grid=grid(4), stream=stream0)
return (reinterpret_tensor(buf2, (4, 1), (1, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class FrameAvgPool(nn.Module):
def __init__(self, cfg):
super(FrameAvgPool, self).__init__()
input_size = cfg.INPUT_SIZE
hidden_size = cfg.HIDDEN_SIZE
kernel_size = cfg.KERNEL_SIZE
stride = cfg.STRIDE
self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)
self.avg_pool = nn.AvgPool1d(kernel_size, stride)
def forward(self, visual_input):
vis_h = torch.relu(self.vis_conv(visual_input))
vis_h = self.avg_pool(vis_h)
return vis_h
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(INPUT_SIZE=4, HIDDEN_SIZE=4,
KERNEL_SIZE=4, STRIDE=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 4), (16, 4, 1))
buf1 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16)](buf1,
primals_2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_avg_pool2d_1[grid(4)](buf1, buf2, 4, XBLOCK=4,
num_warps=1, num_stages=1)
return reinterpret_tensor(buf2, (4, 1), (1, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf1, (4, 1, 4), (4, 4, 1), 0), buf3
class FrameAvgPoolNew(nn.Module):
def __init__(self, cfg):
super(FrameAvgPoolNew, self).__init__()
input_size = cfg.INPUT_SIZE
hidden_size = cfg.HIDDEN_SIZE
kernel_size = cfg.KERNEL_SIZE
stride = cfg.STRIDE
self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)
self.avg_pool = nn.AvgPool1d(kernel_size, stride)
def forward(self, input_0):
primals_1 = self.vis_conv.weight
primals_2 = self.vis_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| CFM-MSG/Code_LEORN | FrameAvgPool | false | 7,616 | [
"MIT"
] | 1 | fabea1e1ded973a4db692e51e2df442bde55f626 | https://github.com/CFM-MSG/Code_LEORN/tree/fabea1e1ded973a4db692e51e2df442bde55f626 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, cfg):
super().__init__()
input_size = cfg.INPUT_SIZE
hidden_size = cfg.HIDDEN_SIZE
kernel_size = cfg.KERNEL_SIZE
stride = cfg.STRIDE
self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1)
self.avg_pool = nn.AvgPool1d(kernel_size, stride)
def forward(self, visual_input):
vis_h = torch.relu(self.vis_conv(visual_input))
vis_h = self.avg_pool(vis_h)
return vis_h
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(INPUT_SIZE=4, HIDDEN_SIZE=4,
KERNEL_SIZE=4, STRIDE=4)}]
|
BertOutput | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/ss/cssn3ayzwsxbizosd6ieezxafjef3fxscx57lbnlxbdiuph3p2je.py
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# u => mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (2))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/l6/cl6vibrzoyykzmbhmvlsdaksh3k2diif7eg66z2ho46tjsy6emma.py
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {})
# %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/4p/c4pnuv3rymhg72qutbvx7mkzv6t7edcefa73bt3nl66b4qtouu4a.py
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# hidden_states_2 => add_2
# mul => mul
# pow_1 => pow_1
# s => mean_1
# sqrt => sqrt
# x => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub]
triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0)
del primals_6
return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.utils.checkpoint
class BertLayerNorm(nn.Module):
"""LayerNorm层, 见Transformer(一), 讲编码器(encoder)的第3部分"""
def __init__(self, hidden_size, eps=1e-12, conditional=False):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
self.conditional = conditional
if conditional is True:
self.weight_dense = nn.Linear(2 * hidden_size, hidden_size,
bias=False)
self.weight_dense.weight.data.uniform_(0, 0)
self.bias_dense = nn.Linear(2 * hidden_size, hidden_size, bias=
False)
self.bias_dense.weight.data.uniform_(0, 0)
def forward(self, x):
if self.conditional is False:
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
else:
inputs = x[0]
cond = x[1]
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(dim=1)
weight = self.weight + self.weight_dense(cond)
bias = self.bias + self.bias_dense(cond)
u = inputs.mean(-1, keepdim=True)
s = (inputs - u).pow(2).mean(-1, keepdim=True)
x = (inputs - u) / torch.sqrt(s + self.variance_epsilon)
return weight * x + bias
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4,
layer_norm_eps=1, hidden_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + 1)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + 2)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + 3)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp9 = tmp6 + tmp8
tmp11 = tmp9 + tmp10
tmp12 = tmp5 + tmp11
tmp16 = tmp13 + tmp15
tmp18 = tmp16 + tmp17
tmp19 = tmp12 + tmp18
tmp23 = tmp20 + tmp22
tmp25 = tmp23 + tmp24
tmp26 = tmp19 + tmp25
tmp27 = 4.0
tmp28 = tmp26 / tmp27
tl.store(out_ptr0 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = libdevice.sqrt(tmp16)
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tmp21 = tmp19 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4,
buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_2
del primals_4
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5,
buf2, primals_6, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_6
return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2
class BertLayerNorm(nn.Module):
"""LayerNorm层, 见Transformer(一), 讲编码器(encoder)的第3部分"""
def __init__(self, hidden_size, eps=1e-12, conditional=False):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
self.conditional = conditional
if conditional is True:
self.weight_dense = nn.Linear(2 * hidden_size, hidden_size,
bias=False)
self.weight_dense.weight.data.uniform_(0, 0)
self.bias_dense = nn.Linear(2 * hidden_size, hidden_size, bias=
False)
self.bias_dense.weight.data.uniform_(0, 0)
def forward(self, x):
if self.conditional is False:
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
else:
inputs = x[0]
cond = x[1]
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(dim=1)
weight = self.weight + self.weight_dense(cond)
bias = self.bias + self.bias_dense(cond)
u = inputs.mean(-1, keepdim=True)
s = (inputs - u).pow(2).mean(-1, keepdim=True)
x = (inputs - u) / torch.sqrt(s + self.variance_epsilon)
return weight * x + bias
class BertOutputNew(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_0, input_1):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_5 = self.LayerNorm.weight
primals_6 = self.LayerNorm.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| Elvisambition/bert_seq2seq | BertOutput | false | 7,617 | [
"Apache-2.0"
] | 1 | 643ac537c16872f0d13200de06001d8201a54fbb | https://github.com/Elvisambition/bert_seq2seq/tree/643ac537c16872f0d13200de06001d8201a54fbb | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.utils.checkpoint
class BertLayerNorm(nn.Module):
"""LayerNorm层, 见Transformer(一), 讲编码器(encoder)的第3部分"""
def __init__(self, hidden_size, eps=1e-12, conditional=False):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
self.conditional = conditional
if conditional is True:
self.weight_dense = nn.Linear(2 * hidden_size, hidden_size,
bias=False)
self.weight_dense.weight.data.uniform_(0, 0)
self.bias_dense = nn.Linear(2 * hidden_size, hidden_size, bias=
False)
self.bias_dense.weight.data.uniform_(0, 0)
def forward(self, x):
if self.conditional is False:
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
else:
inputs = x[0]
cond = x[1]
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(dim=1)
weight = self.weight + self.weight_dense(cond)
bias = self.bias + self.bias_dense(cond)
u = inputs.mean(-1, keepdim=True)
s = (inputs - u).pow(2).mean(-1, keepdim=True)
x = (inputs - u) / torch.sqrt(s + self.variance_epsilon)
return weight * x + bias
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.
layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(intermediate_size=4, hidden_size=4,
layer_norm_eps=1, hidden_dropout_prob=0.5)}]
|
EncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bs/cbsluabtq7ll426nybkislhh3cajm6f7ggrxam362hohynwnvtk6.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_8, 0), kwargs = {})
triton_poi_fused_eq_1 = async_compile.triton('triton_poi_fused_eq_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/cn/ccngzawdtctw47u5nulr7jdzgrltex6baih5mlgcfzgpyl7q3lti.py
# Topologically Sorted Source Nodes: [energy, energy_1, attention], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attention => amax, exp, sub, sum_1
# energy => div
# energy_1 => full_default, where
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 2.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x2), tmp20, xmask)
tl.store(out_ptr1 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wo/cwomfv727rkjpxhkbg45crim7a2bkrverw53z3xwuf2c6uswvcp2.py
# Topologically Sorted Source Nodes: [energy, energy_1, attention], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attention => amax, div_1, exp, sub
# energy => div
# energy_1 => full_default, where
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 2.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_3 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x4), xmask)
tmp6 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x5), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x4), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6m/c6mhj5zwirfhy5e4o45uaeov72uwfby4udubpm2fcz42iqvs2g57.py
# Topologically Sorted Source Nodes: [add, src], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# src => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_17), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iz/cizh7p23zwsiqbrt6dvrlvjzpyujwvyyaolptfk5xtby6foymiaz.py
# Topologically Sorted Source Nodes: [add, src], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# src => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %view_17), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_11), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_12), kwargs = {})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/u4/cu4mvhweewrefdurxuza5qfbqlwomkc67kmxkkaurh6luaf2e2fz.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_19,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_7 = async_compile.triton('triton_poi_fused_relu_threshold_backward_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/he/chevf4d6tadiz3y2a2abr2lj2bvo3wyfykoivwj2s4xedp3vdjuf.py
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_1 => add_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_21), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hn/chnyp4bqchi6cc3qkpikodtjzt7sfs4gz3r2kunqaesb7ahrywso.py
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_1 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/al/cal3txxjlyumb2wxf6pzsp7g5yvv5ygiluv6ygjjzldvb2woph4t.py
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src_1 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_2, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_17), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_18), kwargs = {})
triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, ), (1, ))
assert_size_stride(primals_18, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
triton_poi_fused_eq_1.run(primals_8, buf6, 64, grid=grid(64), stream=stream0)
del primals_8
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [energy, energy_1, attention], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf5, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [energy, energy_1, attention], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_3.run(buf9, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, src], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(primals_1, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, src], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf13, buf14, buf15, primals_11, primals_12, buf16, 64, grid=grid(64), stream=stream0)
del primals_12
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0); del buf17 # reuse
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_7.run(buf18, primals_14, buf24, 64, grid=grid(64), stream=stream0)
del primals_14
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf20, buf16, primals_16, 64, grid=grid(64), stream=stream0)
del primals_16
buf21 = buf15; del buf15 # reuse
buf22 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf20, buf21, buf22, 16, grid=grid(16), stream=stream0)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_10.run(buf20, buf21, buf22, primals_17, primals_18, buf23, 64, grid=grid(64), stream=stream0)
del buf21
del buf22
del primals_18
return (buf23, primals_1, primals_11, primals_17, buf6, buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(buf18, (16, 4), (4, 1), 0), buf20, primals_15, buf24, primals_13, primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, d_model, n_heads, dropout):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.fc_q = nn.Linear(d_model, d_model)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.scale = math.sqrt(d_model)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K_t = K.permute(0, 1, 3, 2)
energy = torch.matmul(Q, K_t) / self.scale
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e+18)
attention = torch.softmax(energy, dim=-1)
x = torch.matmul(self.dropout(attention), V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.d_model)
x = self.fc_o(x)
return x, attention
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, d_model, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(d_model, pf_dim)
self.fc_2 = nn.Linear(pf_dim, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(torch.relu(self.fc_1(x)))
x = self.fc_2(x)
return x
class EncoderLayer(nn.Module):
def __init__(self, d_model, n_heads, dff, dropout):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(d_model)
self.ff_layer_norm = nn.LayerNorm(d_model)
self.self_attention = MultiHeadAttentionLayer(d_model, n_heads, dropout
)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(d_model,
dff, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
_src, _ = self.self_attention(src, src, src, src_mask)
src = self.self_attn_layer_norm(src + self.dropout(_src))
_src = self.positionwise_feedforward(src)
src = self.ff_layer_norm(src + self.dropout(_src))
return src
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'n_heads': 4, 'dff': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x2, tmp20, xmask)
tl.store(out_ptr1 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_3(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex % 64
x4 = xindex
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_out_ptr0 + x4, xmask)
tmp6 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x4, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_7(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17, primals_18
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4,), (1,))
assert_size_stride(primals_18, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_eq_1[grid(64)](primals_8, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_8
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf5,
buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused__softmax_div_masked_fill_3[grid(256)](buf9, buf6,
buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(16, 4)](buf2, primals_7, buf10, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_10, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf15 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_native_layer_norm_5[grid(16)](primals_1, buf13,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_6[grid(64)](primals_1, buf13,
buf14, buf15, primals_11, primals_12, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_12
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4), (16, 4, 1), 0)
del buf17
buf24 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_7[grid(64)](buf18,
primals_14, buf24, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
triton_poi_fused_add_8[grid(64)](buf20, buf16, primals_16, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf21 = buf15
del buf15
buf22 = buf14
del buf14
triton_poi_fused_native_layer_norm_9[grid(16)](buf20, buf21, buf22,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf20, buf21, buf22,
primals_17, primals_18, buf23, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf21
del buf22
del primals_18
return (buf23, primals_1, primals_11, primals_17, buf6, buf9,
reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13,
reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(
buf18, (16, 4), (4, 1), 0), buf20, primals_15, buf24, primals_13,
primals_9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, d_model, n_heads, dropout):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.fc_q = nn.Linear(d_model, d_model)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.scale = math.sqrt(d_model)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K_t = K.permute(0, 1, 3, 2)
energy = torch.matmul(Q, K_t) / self.scale
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e+18)
attention = torch.softmax(energy, dim=-1)
x = torch.matmul(self.dropout(attention), V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.d_model)
x = self.fc_o(x)
return x, attention
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, d_model, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(d_model, pf_dim)
self.fc_2 = nn.Linear(pf_dim, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(torch.relu(self.fc_1(x)))
x = self.fc_2(x)
return x
class EncoderLayerNew(nn.Module):
def __init__(self, d_model, n_heads, dff, dropout):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(d_model)
self.ff_layer_norm = nn.LayerNorm(d_model)
self.self_attention = MultiHeadAttentionLayer(d_model, n_heads, dropout
)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(d_model,
dff, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, input_0, input_1):
primals_3 = self.self_attn_layer_norm.weight
primals_5 = self.self_attn_layer_norm.bias
primals_7 = self.ff_layer_norm.weight
primals_10 = self.ff_layer_norm.bias
primals_2 = self.self_attention.fc_q.weight
primals_11 = self.self_attention.fc_q.bias
primals_4 = self.self_attention.fc_k.weight
primals_12 = self.self_attention.fc_k.bias
primals_6 = self.self_attention.fc_v.weight
primals_14 = self.self_attention.fc_v.bias
primals_9 = self.self_attention.fc_o.weight
primals_16 = self.self_attention.fc_o.bias
primals_13 = self.positionwise_feedforward.fc_1.weight
primals_17 = self.positionwise_feedforward.fc_1.bias
primals_15 = self.positionwise_feedforward.fc_2.weight
primals_18 = self.positionwise_feedforward.fc_2.bias
primals_1 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18])
return output[0]
| salvacarrion/nmt-continual-learning | EncoderLayer | false | 7,618 | [
"MIT"
] | 1 | 302147ac9c270f3341a68a72c803c457f05ff37b | https://github.com/salvacarrion/nmt-continual-learning/tree/302147ac9c270f3341a68a72c803c457f05ff37b | import math
import torch
import torch.nn as nn
class MultiHeadAttentionLayer(nn.Module):
def __init__(self, d_model, n_heads, dropout):
super().__init__()
assert d_model % n_heads == 0
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.fc_q = nn.Linear(d_model, d_model)
self.fc_k = nn.Linear(d_model, d_model)
self.fc_v = nn.Linear(d_model, d_model)
self.fc_o = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.scale = math.sqrt(d_model)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[0]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0,
2, 1, 3)
K_t = K.permute(0, 1, 3, 2)
energy = torch.matmul(Q, K_t) / self.scale
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e+18)
attention = torch.softmax(energy, dim=-1)
x = torch.matmul(self.dropout(attention), V)
x = x.permute(0, 2, 1, 3).contiguous()
x = x.view(batch_size, -1, self.d_model)
x = self.fc_o(x)
return x, attention
class PositionwiseFeedforwardLayer(nn.Module):
def __init__(self, d_model, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(d_model, pf_dim)
self.fc_2 = nn.Linear(pf_dim, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.dropout(torch.relu(self.fc_1(x)))
x = self.fc_2(x)
return x
class Model(nn.Module):
def __init__(self, d_model, n_heads, dff, dropout):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(d_model)
self.ff_layer_norm = nn.LayerNorm(d_model)
self.self_attention = MultiHeadAttentionLayer(d_model, n_heads, dropout
)
self.positionwise_feedforward = PositionwiseFeedforwardLayer(d_model,
dff, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
_src, _ = self.self_attention(src, src, src, src_mask)
src = self.self_attn_layer_norm(src + self.dropout(_src))
_src = self.positionwise_feedforward(src)
src = self.ff_layer_norm(src + self.dropout(_src))
return src
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
T5DenseReluDense | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/3v/c3v7n6hzyrv5pn6uojl3hf6tko347a672spakigdzmqm7ebd4zwl.py
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# hidden_states_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [hidden_states_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, buf3, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [hidden_states_3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.checkpoint
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = F.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(d_model=4, d_ff=4, dropout_rate=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.checkpoint
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = 0.0
tmp4 = tmp2 <= tmp3
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf2)
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_3, buf3
class T5DenseReluDenseNew(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, input_0):
primals_1 = self.wi.weight
primals_3 = self.wo.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Elvisambition/bert_seq2seq | T5DenseReluDense | false | 7,619 | [
"Apache-2.0"
] | 1 | 643ac537c16872f0d13200de06001d8201a54fbb | https://github.com/Elvisambition/bert_seq2seq/tree/643ac537c16872f0d13200de06001d8201a54fbb | from _paritybench_helpers import _mock_config
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.checkpoint
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = F.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
AdapterLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/6s/c6shmuvjmq6zc4ifvdsynorwri47ra63qxa7jg3e7p6lw6xlqj5q.py
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, net_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
# Source node to ATen node mapping:
# add => add
# erf => erf
# mul => mul
# net_1 => mul_1
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, 1.4142135623730951), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%div,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_add_div_erf_mul_0 = async_compile.triton('triton_poi_fused_add_div_erf_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_erf_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mz/cmz3wjq2uutgv7zzhrquuijmcstklp4wvd4q2ptdi3fpwbjqcpo6.py
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_1 => add_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [net], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, truediv, erf, add, net_1], Original ATen: [aten.mul, aten.div, aten.erf, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, primals_5, primals_3, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class AdapterLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.adapter_linear1 = nn.Linear(config.hidden_size, config.
adapter_size)
self.gelu = gelu
self.adapter_linear2 = nn.Linear(config.adapter_size, config.
hidden_size)
def forward(self, input_tensor):
net = self.adapter_linear1(input_tensor)
net = self.gelu(net)
net = self.adapter_linear2(net)
return net + input_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, adapter_size=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865475
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_erf_mul_0[grid(256)](buf0, buf1, 256,
XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, primals_5, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class AdapterLayerNew(nn.Module):
def __init__(self, config):
super().__init__()
self.adapter_linear1 = nn.Linear(config.hidden_size, config.
adapter_size)
self.gelu = gelu
self.adapter_linear2 = nn.Linear(config.adapter_size, config.
hidden_size)
def forward(self, input_0):
primals_1 = self.adapter_linear1.weight
primals_2 = self.adapter_linear1.bias
primals_4 = self.adapter_linear2.weight
primals_5 = self.adapter_linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| DAQuestionAnswering/Bert-n-Pals | AdapterLayer | false | 7,620 | [
"MIT"
] | 1 | d5a288b9ac62259e70c249635108ba3906e19f00 | https://github.com/DAQuestionAnswering/Bert-n-Pals/tree/d5a288b9ac62259e70c249635108ba3906e19f00 | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.adapter_linear1 = nn.Linear(config.hidden_size, config.
adapter_size)
self.gelu = gelu
self.adapter_linear2 = nn.Linear(config.adapter_size, config.
hidden_size)
def forward(self, input_tensor):
net = self.adapter_linear1(input_tensor)
net = self.gelu(net)
net = self.adapter_linear2(net)
return net + input_tensor
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BertSelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x2/cx2hdvwyo7m5jvhhvtugzxqvmy6z4nsfhkkjhvgzbbm3cb6dsum2.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %mul_scalar : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%permute_default, 1.0), kwargs = {})
# %clone_default : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/iz/ciztqj6kop3hxov46yrmzprkzfir3eljcic4mkqznz2j5cfeaudr.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %sum_dim_int_list : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_default, [-1], True), kwargs = {})
# %eq_scalar : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%add_tensor, -inf), kwargs = {})
# %logical_not_default : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%eq_scalar,), kwargs = {})
# %any_dim : [num_users=1] = call_function[target=torch.ops.aten.any.dim](args = (%logical_not_default, -1, True), kwargs = {})
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float("-inf")
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = (tmp29 != 0)
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = (tmp33 != 0)
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = (tmp38 != 0)
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = (tmp43 != 0)
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + (x2), tmp14, xmask)
tl.store(out_ptr1 + (x2), tmp25, xmask)
tl.store(out_ptr2 + (x2), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/x5/cx5uvbfethxuwwkwxf3xaualzhlcwqsz4jxqpbhintggaypzjwqf.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_default_2, %primals_8), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_tensor, %amax_default), kwargs = {})
# %exp_default : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_tensor,), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp_default, %sum_dim_int_list), kwargs = {})
# %logical_not_default_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_not.default](args = (%any_dim,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_self : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%logical_not_default_1, %full_default, %div_tensor), kwargs = {})
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 4)
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_out_ptr0 + (x4), xmask)
tmp3 = tl.load(in_ptr1 + (x5), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vv/cvvnhithjvmvhfjufxwwzclfobkrgbyyteg66hp24r675f7elw4c.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %clone_default_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_default_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6t/c6t5a5ere3lqjiu7zh3uu4oxmpdoujdaqqmeunxqapgzo4m74uav.py
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# context_layer_1 => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(buf0, primals_2, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_0.run(buf1, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(buf5, primals_8, buf6, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(buf9, buf8, primals_8, buf6, buf7, 256, grid=grid(256), stream=stream0)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(buf2, primals_7, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [context_layer_1], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
del buf11
return (reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp6, tmp9)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.maximum(tmp10, tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp5 - tmp14
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp16 + tmp18
tmp20 = tmp9 - tmp14
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp13 - tmp14
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = float('-inf')
tmp27 = tmp2 == tmp26
tmp28 = tmp27 == 0
tmp29 = tmp28.to(tl.int64)
tmp30 = tmp29 != 0
tmp31 = tmp5 == tmp26
tmp32 = tmp31 == 0
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 != 0
tmp35 = tmp30 | tmp34
tmp36 = tmp9 == tmp26
tmp37 = tmp36 == 0
tmp38 = tmp37.to(tl.int64)
tmp39 = tmp38 != 0
tmp40 = tmp35 | tmp39
tmp41 = tmp13 == tmp26
tmp42 = tmp41 == 0
tmp43 = tmp42.to(tl.int64)
tmp44 = tmp43 != 0
tmp45 = tmp40 | tmp44
tl.store(out_ptr0 + x2, tmp14, xmask)
tl.store(out_ptr1 + x2, tmp25, xmask)
tl.store(out_ptr2 + x2, tmp45, xmask)
@triton.jit
def triton_poi_fused_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 4
x4 = xindex
x5 = xindex % 64
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr1 + x5, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x3, xmask, eviction_policy='evict_last')
tmp1 = tmp0 == 0
tmp4 = tmp2 + tmp3
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tl.where(tmp1, tmp10, tmp9)
tl.store(in_out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 4)](buf0, primals_2, buf3, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_2
buf4 = reinterpret_tensor(buf0, (4, 4, 1, 4), (16, 4, 4, 1), 0)
del buf0
triton_poi_fused_0[grid(16, 4)](buf1, primals_5, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.bool)
triton_poi_fused_1[grid(64)](buf5, primals_8, buf6, buf7, buf8, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_2[grid(256)](buf9, buf8, primals_8, buf6, buf7,
256, XBLOCK=256, num_warps=4, num_stages=1)
del buf8
del primals_8
buf10 = reinterpret_tensor(buf7, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf7
triton_poi_fused_3[grid(16, 4)](buf2, primals_7, buf10, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf6
triton_poi_fused_clone_4[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
del buf11
return reinterpret_tensor(buf12, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf9, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class BertSelfAttentionNew(nn.Module):
def __init__(self, config):
super(BertSelfAttentionNew, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, input_0, input_1):
primals_1 = self.query.weight
primals_2 = self.query.bias
primals_4 = self.key.weight
primals_5 = self.key.bias
primals_6 = self.value.weight
primals_7 = self.value.bias
primals_3 = input_0
primals_8 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| AlanFokCo/bert-chinese-horovod-elastic | BertSelfAttention | false | 7,621 | [
"Apache-2.0"
] | 1 | 02317d0857e0e8e313dd63ead61ca9996b25548e | https://github.com/AlanFokCo/bert-chinese-horovod-elastic/tree/02317d0857e0e8e313dd63ead61ca9996b25548e | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
'The hidden size (%d) is not a multiple of the number of attention heads (%d)'
% (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.
num_attention_heads)
self.all_head_size = (self.num_attention_heads * self.
attention_head_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.
attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1,
-2))
attention_scores = attention_scores / math.sqrt(self.
attention_head_size)
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.
all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, num_attention_heads=
4, attention_probs_dropout_prob=0.5)}]
|
RobertaClassificationHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lz/clzc7c4rqtr7ky6jrepxpu2dlmeo4y66gzcis5bqhwixpt7ktopj.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_3 => tanh
# Graph fragment:
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn
class RobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5, num_labels=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4
class RobertaClassificationHeadNew(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super(RobertaClassificationHeadNew, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| GavinGuan95/Generative-VQA | RobertaClassificationHead | false | 7,622 | [
"MIT"
] | 1 | 0912e3a2426809ef4d4eb40bae667b31c2269161 | https://github.com/GavinGuan95/Generative-VQA/tree/0912e3a2426809ef4d4eb40bae667b31c2269161 | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn
class Model(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, hidden_dropout_prob=
0.5, num_labels=4)}]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/kn/cknyjwkwufnzzf4ya3scui55ownkmt5cdh3hggzwsfe3ch5fshzm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yw/cywelngc4cje5ebivdldhamyoxvyf25txstz7duqznwukobbwawy.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 96
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (48*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wr/cwrbaplpfk7m6giisotqeykajo7urpubzk4y7hl6wjrhxxtwwukj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (512*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dx/cdx5ml2qpofihmmpnvabqkpaoyptwmwdx4jtjzptieewtlhrqlmf.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kv/ckvorupxanzrceis7ogps6qnxhad4srcb6zrfzpkwhenxdnsalg7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (2048*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gn/cgnnleskizx3ohn3pvhpxvsg3tck6tl4c2drwdqhjj4u3dvcubg3.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out_1 => convolution
# out_2 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_5 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p7/cp7ewy5re5eo3gxb7smcipqvqx3btzxh5rgjq4pqcucakbzeumra.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out_3 => convolution_1
# out_4 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/or/cornapfmmnpmsrxhsq3fikjikxxwl4vxaa5shjmoppfidyrlwcd6.py
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out_5 => convolution_2
# out_6 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_7 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_7(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dd/cddnfecs3m6l3ftbsjdw6yae6npdfa4lf3fsl2ynhtakutnl5qqk.py
# Topologically Sorted Source Nodes: [out_7, out_8], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# out_7 => convolution_3
# out_8 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.01), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 256], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x1 + (256*y0)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x1 + (256*y0)), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y2 + (4*x1) + (1024*y3)), tmp7, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/k5/ck5zfn4npjsrc6pwdqktyaf2nwodoiherdpaf3tdir3p5gwoqzrg.py
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# out_11 => gt_4, mul_4, where_4
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_11), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.01), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %add_tensor_2, %mul_4), kwargs = {})
triton_poi_fused_leaky_relu_9 = async_compile.triton('triton_poi_fused_leaky_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr1 + (x2), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/re/crevrauvvakjyaka5z3itt7lwh24ztlo2jnznvzvvvwwv6tgyynj.py
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# out_13 => gt_5, mul_5, where_5
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_13), kwargs = {})
# %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.01), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %add_tensor_1, %mul_5), kwargs = {})
triton_poi_fused_leaky_relu_10 = async_compile.triton('triton_poi_fused_leaky_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vm/cvmlnewb3thhvvojbksgxyqdgxq7lxzuz3xxcjtnjna5qvhp3zch.py
# Topologically Sorted Source Nodes: [out_15, out_16], Original ATen: [aten.relu, aten._softmax]
# Source node to ATen node mapping:
# out_15 => relu
# out_16 => amax, exp, sub, sum_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
triton_poi_fused__softmax_relu_11 = async_compile.triton('triton_poi_fused__softmax_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_relu_11(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 15
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp5 = tl.load(in_ptr0 + (15 + x0), xmask)
tmp9 = tl.load(in_ptr0 + (30 + x0), xmask)
tmp13 = tl.load(in_ptr0 + (45 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp3, tmp10)
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xy/cxygw4yj5nk3ctul6tbroey6iy2hnhxuwpynuqvtknhew7gowxpj.py
# Topologically Sorted Source Nodes: [out_15, out_16], Original ATen: [aten.relu, aten._softmax, aten.threshold_backward]
# Source node to ATen node mapping:
# out_15 => relu
# out_16 => amax, div, exp, sub, sum_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%relu, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused__softmax_relu_threshold_backward_12 = async_compile.triton('triton_poi_fused__softmax_relu_threshold_backward_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_relu_threshold_backward_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_relu_threshold_backward_12(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 60
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 15
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tmp4 <= tmp10
tl.store(out_ptr0 + (x2), tmp9, xmask)
tl.store(out_ptr1 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (512, 1024), (1024, 1))
assert_size_stride(primals_11, (512, ), (1, ))
assert_size_stride(primals_12, (64, 512), (512, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (15, 64), (64, 1))
assert_size_stride(primals_15, (15, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((32, 3, 4, 4), (48, 1, 12, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 96, 16, grid=grid(96, 16), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 16, grid=grid(2048, 16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 16, grid=grid(8192, 16), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 32768, 16, grid=grid(32768, 16), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32))
buf6 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32), torch.bool)
buf7 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_5.run(buf5, primals_3, buf6, buf7, 123008, grid=grid(123008), stream=stream0)
del buf5
del primals_3
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 14, 14), (12544, 1, 896, 64))
buf9 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.bool)
buf10 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32)
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf8, primals_5, buf9, buf10, 50176, grid=grid(50176), stream=stream0)
del buf8
del primals_5
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 128, 6, 6), (4608, 1, 768, 128))
buf12 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.bool)
buf13 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128), torch.float32)
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_7.run(buf11, primals_7, buf12, buf13, 18432, grid=grid(18432), stream=stream0)
del buf11
del primals_7
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 2, 2), (1024, 1, 512, 256))
buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool)
buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_7, out_8], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf14, primals_9, buf15, buf16, 16, 256, grid=grid(16, 256), stream=stream0)
del buf14
del primals_9
buf17 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 512), (1, 1024), 0), out=buf17)
buf18 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf19 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_9.run(buf17, primals_11, buf18, buf19, 2048, grid=grid(2048), stream=stream0)
del buf17
del primals_11
buf20 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf19, reinterpret_tensor(primals_12, (512, 64), (1, 512), 0), out=buf20)
buf21 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
buf22 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_10.run(buf20, primals_13, buf21, buf22, 256, grid=grid(256), stream=stream0)
del buf20
del primals_13
buf23 = empty_strided_cuda((4, 15), (15, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf22, reinterpret_tensor(primals_14, (64, 15), (1, 64), 0), out=buf23)
buf24 = empty_strided_cuda((1, 15), (15, 1), torch.float32)
buf25 = empty_strided_cuda((1, 15), (15, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_15, out_16], Original ATen: [aten.relu, aten._softmax]
triton_poi_fused__softmax_relu_11.run(buf23, primals_15, buf24, buf25, 15, grid=grid(15), stream=stream0)
buf26 = empty_strided_cuda((4, 15), (15, 1), torch.float32)
buf27 = empty_strided_cuda((4, 15), (15, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_15, out_16], Original ATen: [aten.relu, aten._softmax, aten.threshold_backward]
triton_poi_fused__softmax_relu_threshold_backward_12.run(buf23, primals_15, buf24, buf25, buf26, buf27, 60, grid=grid(60), stream=stream0)
del buf23
del buf24
del buf25
del primals_15
return (buf26, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf9, buf10, buf12, buf13, buf15, reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), buf18, buf19, buf21, buf22, buf26, buf27, primals_14, primals_12, primals_10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((15, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((15, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class Actor(torch.nn.Module):
def __init__(self, actor_lr, epsilon):
super(Actor, self).__init__()
self.epsilon = epsilon
self.define_network()
self.optimizer = torch.optim.Adam(params=self.parameters(), lr=actor_lr
)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu:0')
self
self.prev_params = self.parameters()
def define_network(self):
self.relu = torch.nn.ReLU()
self.leaky_relu = torch.nn.LeakyReLU()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax(dim=0)
self.l1 = torch.nn.Linear(1024, 512)
self.l2 = torch.nn.Linear(512, 64)
self.l3 = torch.nn.Linear(64, 15)
self.conv1 = torch.nn.Conv2d(3, 32, kernel_size=4, stride=2)
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = torch.nn.Conv2d(64, 128, kernel_size=4, stride=2)
self.conv4 = torch.nn.Conv2d(128, 256, kernel_size=4, stride=2)
def loss(self, log_probs, k_log_probs, advantages):
r_theta = torch.exp(log_probs - k_log_probs)
clipped_r = torch.clamp(r_theta, 1.0 - self.epsilon, 1.0 + self.epsilon
)
return torch.mean(torch.min(r_theta * advantages, clipped_r *
advantages))
def forward(self, x):
out = torch.Tensor(x).float()
out = self.conv1(out)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
out = self.conv3(out)
out = self.leaky_relu(out)
out = self.conv4(out)
out = self.leaky_relu(out)
out = out.reshape(-1, 2 * 2 * 256)
out = self.l1(out)
out = self.leaky_relu(out)
out = self.l2(out)
out = self.leaky_relu(out)
out = self.l3(out)
out = self.relu(out)
out = self.softmax(out)
return out
def optimize(self, log_probs, k_log_probs, advantages, batch_sz=32):
loss = self.loss(log_probs, k_log_probs, advantages)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'actor_lr': 4, 'epsilon': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 96
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 48 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_5(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_7(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 256
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
y2 = yindex % 4
y3 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x1 + 256 * y0), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x1 + 256 * y0), tmp4, xmask & ymask)
tl.store(out_ptr1 + (y2 + 4 * x1 + 1024 * y3), tmp7, xmask & ymask)
@triton.jit
def triton_poi_fused_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, None)
tl.store(out_ptr1 + x2, tmp7, None)
@triton.jit
def triton_poi_fused_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_11(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 15
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp5 = tl.load(in_ptr0 + (15 + x0), xmask)
tmp9 = tl.load(in_ptr0 + (30 + x0), xmask)
tmp13 = tl.load(in_ptr0 + (45 + x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp5 + tmp1
tmp7 = triton_helpers.maximum(tmp3, tmp6)
tmp8 = triton_helpers.maximum(tmp4, tmp7)
tmp10 = tmp9 + tmp1
tmp11 = triton_helpers.maximum(tmp3, tmp10)
tmp12 = triton_helpers.maximum(tmp8, tmp11)
tmp14 = tmp13 + tmp1
tmp15 = triton_helpers.maximum(tmp3, tmp14)
tmp16 = triton_helpers.maximum(tmp12, tmp15)
tmp17 = tmp4 - tmp16
tmp18 = tl_math.exp(tmp17)
tmp19 = tmp7 - tmp16
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp18 + tmp20
tmp22 = tmp11 - tmp16
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp21 + tmp23
tmp25 = tmp15 - tmp16
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp24 + tmp26
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp27, xmask)
@triton.jit
def triton_poi_fused__softmax_relu_threshold_backward_12(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 60
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 15
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp9 = tmp7 / tmp8
tmp10 = 0.0
tmp11 = tmp4 <= tmp10
tl.store(out_ptr0 + x2, tmp9, xmask)
tl.store(out_ptr1 + x2, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (512, 1024), (1024, 1))
assert_size_stride(primals_11, (512,), (1,))
assert_size_stride(primals_12, (64, 512), (512, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (15, 64), (64, 1))
assert_size_stride(primals_15, (15,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(12, 4096)](primals_1, buf0, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((32, 3, 4, 4), (48, 1, 12, 3), torch.float32)
triton_poi_fused_1[grid(96, 16)](primals_2, buf1, 96, 16, XBLOCK=16,
YBLOCK=64, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf0, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32))
buf6 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32),
torch.bool)
buf7 = empty_strided_cuda((4, 32, 31, 31), (30752, 1, 992, 32),
torch.float32)
triton_poi_fused_convolution_leaky_relu_5[grid(123008)](buf5,
primals_3, buf6, buf7, 123008, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf5
del primals_3
buf8 = extern_kernels.convolution(buf7, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 14, 14), (12544, 1, 896, 64))
buf9 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.bool)
buf10 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(50176)](buf8,
primals_5, buf9, buf10, 50176, XBLOCK=512, num_warps=4,
num_stages=1)
del buf8
del primals_5
buf11 = extern_kernels.convolution(buf10, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 128, 6, 6), (4608, 1, 768, 128))
buf12 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.bool)
buf13 = empty_strided_cuda((4, 128, 6, 6), (4608, 1, 768, 128),
torch.float32)
triton_poi_fused_convolution_leaky_relu_7[grid(18432)](buf11,
primals_7, buf12, buf13, 18432, XBLOCK=256, num_warps=4,
num_stages=1)
del buf11
del primals_7
buf14 = extern_kernels.convolution(buf13, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 256, 2, 2), (1024, 1, 512, 256))
buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.bool)
buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_8[grid(16, 256)](buf14,
primals_9, buf15, buf16, 16, 256, XBLOCK=256, YBLOCK=1,
num_warps=4, num_stages=1)
del buf14
del primals_9
buf17 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0
), reinterpret_tensor(primals_10, (1024, 512), (1, 1024), 0),
out=buf17)
buf18 = empty_strided_cuda((4, 512), (512, 1), torch.bool)
buf19 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
triton_poi_fused_leaky_relu_9[grid(2048)](buf17, primals_11, buf18,
buf19, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del buf17
del primals_11
buf20 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf19, reinterpret_tensor(primals_12, (512, 64),
(1, 512), 0), out=buf20)
buf21 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
buf22 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_leaky_relu_10[grid(256)](buf20, primals_13, buf21,
buf22, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf20
del primals_13
buf23 = empty_strided_cuda((4, 15), (15, 1), torch.float32)
extern_kernels.mm(buf22, reinterpret_tensor(primals_14, (64, 15), (
1, 64), 0), out=buf23)
buf24 = empty_strided_cuda((1, 15), (15, 1), torch.float32)
buf25 = empty_strided_cuda((1, 15), (15, 1), torch.float32)
triton_poi_fused__softmax_relu_11[grid(15)](buf23, primals_15,
buf24, buf25, 15, XBLOCK=16, num_warps=1, num_stages=1)
buf26 = empty_strided_cuda((4, 15), (15, 1), torch.float32)
buf27 = empty_strided_cuda((4, 15), (15, 1), torch.bool)
triton_poi_fused__softmax_relu_threshold_backward_12[grid(60)](buf23,
primals_15, buf24, buf25, buf26, buf27, 60, XBLOCK=64,
num_warps=1, num_stages=1)
del buf23
del buf24
del buf25
del primals_15
return (buf26, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf9, buf10,
buf12, buf13, buf15, reinterpret_tensor(buf16, (4, 1024), (1024, 1),
0), buf18, buf19, buf21, buf22, buf26, buf27, primals_14,
primals_12, primals_10)
class ActorNew(torch.nn.Module):
def __init__(self, actor_lr, epsilon):
super(ActorNew, self).__init__()
self.epsilon = epsilon
self.define_network()
self.optimizer = torch.optim.Adam(params=self.parameters(), lr=actor_lr
)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu:0')
self
self.prev_params = self.parameters()
def define_network(self):
self.relu = torch.nn.ReLU()
self.leaky_relu = torch.nn.LeakyReLU()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax(dim=0)
self.l1 = torch.nn.Linear(1024, 512)
self.l2 = torch.nn.Linear(512, 64)
self.l3 = torch.nn.Linear(64, 15)
self.conv1 = torch.nn.Conv2d(3, 32, kernel_size=4, stride=2)
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = torch.nn.Conv2d(64, 128, kernel_size=4, stride=2)
self.conv4 = torch.nn.Conv2d(128, 256, kernel_size=4, stride=2)
def loss(self, log_probs, k_log_probs, advantages):
r_theta = torch.exp(log_probs - k_log_probs)
clipped_r = torch.clamp(r_theta, 1.0 - self.epsilon, 1.0 + self.epsilon
)
return torch.mean(torch.min(r_theta * advantages, clipped_r *
advantages))
def optimize(self, log_probs, k_log_probs, advantages, batch_sz=32):
loss = self.loss(log_probs, k_log_probs, advantages)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def forward(self, input_0):
primals_10 = self.l1.weight
primals_11 = self.l1.bias
primals_12 = self.l2.weight
primals_5 = self.l2.bias
primals_14 = self.l3.weight
primals_15 = self.l3.bias
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_13 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| Gregory-Eales/Proximal-Policy-Optimization | Actor | false | 7,623 | [
"Apache-2.0"
] | 1 | 134f930bd1436c34e79af9344fe70f75e11c8a30 | https://github.com/Gregory-Eales/Proximal-Policy-Optimization/tree/134f930bd1436c34e79af9344fe70f75e11c8a30 | import torch
class Model(torch.nn.Module):
def __init__(self, actor_lr, epsilon):
super().__init__()
self.epsilon = epsilon
self.define_network()
self.optimizer = torch.optim.Adam(params=self.parameters(), lr=actor_lr
)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else
'cpu:0')
self
self.prev_params = self.parameters()
def define_network(self):
self.relu = torch.nn.ReLU()
self.leaky_relu = torch.nn.LeakyReLU()
self.sigmoid = torch.nn.Sigmoid()
self.tanh = torch.nn.Tanh()
self.softmax = torch.nn.Softmax(dim=0)
self.l1 = torch.nn.Linear(1024, 512)
self.l2 = torch.nn.Linear(512, 64)
self.l3 = torch.nn.Linear(64, 15)
self.conv1 = torch.nn.Conv2d(3, 32, kernel_size=4, stride=2)
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = torch.nn.Conv2d(64, 128, kernel_size=4, stride=2)
self.conv4 = torch.nn.Conv2d(128, 256, kernel_size=4, stride=2)
def loss(self, log_probs, k_log_probs, advantages):
r_theta = torch.exp(log_probs - k_log_probs)
clipped_r = torch.clamp(r_theta, 1.0 - self.epsilon, 1.0 + self.epsilon
)
return torch.mean(torch.min(r_theta * advantages, clipped_r *
advantages))
def forward(self, x):
out = torch.Tensor(x).float()
out = self.conv1(out)
out = self.leaky_relu(out)
out = self.conv2(out)
out = self.leaky_relu(out)
out = self.conv3(out)
out = self.leaky_relu(out)
out = self.conv4(out)
out = self.leaky_relu(out)
out = out.reshape(-1, 2 * 2 * 256)
out = self.l1(out)
out = self.leaky_relu(out)
out = self.l2(out)
out = self.leaky_relu(out)
out = self.l3(out)
out = self.relu(out)
out = self.softmax(out)
return out
def optimize(self, log_probs, k_log_probs, advantages, batch_sz=32):
loss = self.loss(log_probs, k_log_probs, advantages)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4, 4]
|
Normalization | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
buf1 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_2
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Normalization(nn.Module):
def __init__(self, cfg):
super(Normalization, self).__init__()
self.normalizer = nn.LayerNorm(cfg.embedding_dim,
elementwise_affine=True)
def forward(self, input):
return self.normalizer(input.view(-1, input.size(-1))).view(*input.
size())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(embedding_dim=4)}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
buf1 = empty_strided_cuda((64, 1), (1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del buf1
del primals_2
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1
class NormalizationNew(nn.Module):
def __init__(self, cfg):
super(NormalizationNew, self).__init__()
self.normalizer = nn.LayerNorm(cfg.embedding_dim,
elementwise_affine=True)
def forward(self, input_0):
primals_2 = self.normalizer.weight
primals_3 = self.normalizer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| JustinLiam/DAN | Normalization | false | 7,624 | [
"MIT"
] | 1 | eb29cddad6c93e591854b115ef524643b1cd471c | https://github.com/JustinLiam/DAN/tree/eb29cddad6c93e591854b115ef524643b1cd471c | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, cfg):
super().__init__()
self.normalizer = nn.LayerNorm(cfg.embedding_dim,
elementwise_affine=True)
def forward(self, input):
return self.normalizer(input.view(-1, input.size(-1))).view(*input.
size())
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SingleHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/va/cvak4tz7y5xkgruz5imh2laix3m7at5tnwvirq3yy2nur2qdcdia.py
# Topologically Sorted Source Nodes: [U, tanh], Original ATen: [aten.mul, aten.tanh]
# Source node to ATen node mapping:
# U => mul
# tanh => tanh
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 0.5), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%mul,), kwargs = {})
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 4), kwargs = {})
triton_poi_fused_mul_tanh_0 = async_compile.triton('triton_poi_fused_mul_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp7 = tmp6 * tmp1
tmp8 = libdevice.tanh(tmp7)
tmp9 = tmp8 * tmp4
tmp11 = tmp10 * tmp1
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp12 * tmp4
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp17 = libdevice.tanh(tmp16)
tmp18 = tmp17 * tmp4
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp21 = tmp20 * tmp1
tmp22 = libdevice.tanh(tmp21)
tmp23 = tmp22 * tmp4
tmp24 = triton_helpers.maximum(tmp19, tmp23)
tmp25 = tmp5 - tmp24
tmp26 = 4.0
tmp27 = tmp25 * tmp26
tl.store(out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/am/cam4sww3ppo4w3jhpfve3iek2mi4cmz7fh25jvjggq52vtu6lahc.py
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# attention => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_1, %log), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [U, tanh], Original ATen: [aten.mul, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_tanh_0.run(buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf3
return (buf4, buf2, buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class SingleHeadAttention(nn.Module):
def __init__(self, cfg):
super(SingleHeadAttention, self).__init__()
self.input_dim = cfg.embedding_dim
self.embedding_dim = cfg.embedding_dim
self.value_dim = self.embedding_dim
self.key_dim = self.value_dim
self.tanh_clipping = cfg.tanh_clipping
self.norm_factor = 1 / math.sqrt(self.key_dim)
self.w_query = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))
self.w_key = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1.0 / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, q, h=None, mask=None):
"""
:param q: queries (batch_size, n_query, input_dim)
:param h: data (batch_size, graph_size, input_dim)
:param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)
Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)
:return:
"""
if h is None:
h = q
batch_size, target_size, input_dim = h.size()
n_query = q.size(1)
assert q.size(0) == batch_size
assert q.size(2) == input_dim
assert input_dim == self.input_dim
h_flat = h.reshape(-1, input_dim)
q_flat = q.reshape(-1, input_dim)
shape_k = batch_size, target_size, -1
shape_q = batch_size, n_query, -1
Q = torch.matmul(q_flat, self.w_query).view(shape_q)
K = torch.matmul(h_flat, self.w_key).view(shape_k)
U = self.norm_factor * torch.matmul(Q, K.transpose(1, 2))
U = self.tanh_clipping * torch.tanh(U)
if mask is not None:
mask = mask.view(batch_size, 1, target_size).expand_as(U)
U[mask.bool()] = -100000000.0
attention = torch.log_softmax(U, dim=-1)
out = attention
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'cfg': _mock_config(embedding_dim=4, tanh_clipping=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = libdevice.tanh(tmp2)
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp7 = tmp6 * tmp1
tmp8 = libdevice.tanh(tmp7)
tmp9 = tmp8 * tmp4
tmp11 = tmp10 * tmp1
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp12 * tmp4
tmp14 = triton_helpers.maximum(tmp9, tmp13)
tmp16 = tmp15 * tmp1
tmp17 = libdevice.tanh(tmp16)
tmp18 = tmp17 * tmp4
tmp19 = triton_helpers.maximum(tmp14, tmp18)
tmp21 = tmp20 * tmp1
tmp22 = libdevice.tanh(tmp21)
tmp23 = tmp22 * tmp4
tmp24 = triton_helpers.maximum(tmp19, tmp23)
tmp25 = tmp5 - tmp24
tmp26 = 4.0
tmp27 = tmp25 * tmp26
tl.store(out_ptr0 + x2, tmp27, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_tanh_0[grid(64)](buf2, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf3
return buf4, buf2, buf4, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0)
class SingleHeadAttentionNew(nn.Module):
def __init__(self, cfg):
super(SingleHeadAttentionNew, self).__init__()
self.input_dim = cfg.embedding_dim
self.embedding_dim = cfg.embedding_dim
self.value_dim = self.embedding_dim
self.key_dim = self.value_dim
self.tanh_clipping = cfg.tanh_clipping
self.norm_factor = 1 / math.sqrt(self.key_dim)
self.w_query = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))
self.w_key = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1.0 / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_2 = self.w_query
primals_3 = self.w_key
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| JustinLiam/DAN | SingleHeadAttention | false | 7,625 | [
"MIT"
] | 1 | eb29cddad6c93e591854b115ef524643b1cd471c | https://github.com/JustinLiam/DAN/tree/eb29cddad6c93e591854b115ef524643b1cd471c | from _paritybench_helpers import _mock_config
import math
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, cfg):
super().__init__()
self.input_dim = cfg.embedding_dim
self.embedding_dim = cfg.embedding_dim
self.value_dim = self.embedding_dim
self.key_dim = self.value_dim
self.tanh_clipping = cfg.tanh_clipping
self.norm_factor = 1 / math.sqrt(self.key_dim)
self.w_query = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))
self.w_key = nn.Parameter(torch.Tensor(self.input_dim, self.key_dim))
self.init_parameters()
def init_parameters(self):
for param in self.parameters():
stdv = 1.0 / math.sqrt(param.size(-1))
param.data.uniform_(-stdv, stdv)
def forward(self, q, h=None, mask=None):
"""
:param q: queries (batch_size, n_query, input_dim)
:param h: data (batch_size, graph_size, input_dim)
:param mask: mask (batch_size, n_query, graph_size) or viewable as that (i.e. can be 2 dim if n_query == 1)
Mask should contain 1 if attention is not possible (i.e. mask is negative adjacency)
:return:
"""
if h is None:
h = q
batch_size, target_size, input_dim = h.size()
n_query = q.size(1)
assert q.size(0) == batch_size
assert q.size(2) == input_dim
assert input_dim == self.input_dim
h_flat = h.reshape(-1, input_dim)
q_flat = q.reshape(-1, input_dim)
shape_k = batch_size, target_size, -1
shape_q = batch_size, n_query, -1
Q = torch.matmul(q_flat, self.w_query).view(shape_q)
K = torch.matmul(h_flat, self.w_key).view(shape_k)
U = self.norm_factor * torch.matmul(Q, K.transpose(1, 2))
U = self.tanh_clipping * torch.tanh(U)
if mask is not None:
mask = mask.view(batch_size, 1, target_size).expand_as(U)
U[mask.bool()] = -100000000.0
attention = torch.log_softmax(U, dim=-1)
out = attention
return out
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return []
|
SparsemaxBisect | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/no/cnoevxmi56p67zph5bcvud6uollxe7h3vjriggenmd3qdtjq74rl.py
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, tau_m, sub_6, p_m, sum_2, f_m, sub_3, clamp, sum_1, f_lo, mul_1, tau_lo_1, dm_2, tau_m_1, sub_8, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_10, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_12, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_14, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_16, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_18, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_20, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_22, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_24, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_26, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_28, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_30, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_32, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_34, p_m_14, sum_16, f_m_14, mul_15, tau_lo_15, dm_16, tau_m_15, sub_36, p_m_15, sum_17, f_m_15, mul_16, tau_lo_16, dm_17, tau_m_16, sub_38, p_m_16, sum_18, f_m_16, mul_17, tau_lo_17], Original ATen: [aten.max, aten.sub, aten.div, aten.add, aten.clamp, aten.sum, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm => sub_5
# dm_1 => div
# dm_10 => div_9
# dm_11 => div_10
# dm_12 => div_11
# dm_13 => div_12
# dm_14 => div_13
# dm_15 => div_14
# dm_16 => div_15
# dm_17 => div_16
# dm_2 => div_1
# dm_3 => div_2
# dm_4 => div_3
# dm_5 => div_4
# dm_6 => div_5
# dm_7 => div_6
# dm_8 => div_7
# dm_9 => div_8
# f_lo => sub_4
# f_m => sub_7
# f_m_1 => sub_9
# f_m_10 => sub_27
# f_m_11 => sub_29
# f_m_12 => sub_31
# f_m_13 => sub_33
# f_m_14 => sub_35
# f_m_15 => sub_37
# f_m_16 => sub_39
# f_m_2 => sub_11
# f_m_3 => sub_13
# f_m_4 => sub_15
# f_m_5 => sub_17
# f_m_6 => sub_19
# f_m_7 => sub_21
# f_m_8 => sub_23
# f_m_9 => sub_25
# max_1 => max_1
# mul_1 => mul_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_12 => mul_12
# mul_13 => mul_13
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_17 => mul_17
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# p_m => clamp_min_1
# p_m_1 => clamp_min_2
# p_m_10 => clamp_min_11
# p_m_11 => clamp_min_12
# p_m_12 => clamp_min_13
# p_m_13 => clamp_min_14
# p_m_14 => clamp_min_15
# p_m_15 => clamp_min_16
# p_m_16 => clamp_min_17
# p_m_2 => clamp_min_3
# p_m_3 => clamp_min_4
# p_m_4 => clamp_min_5
# p_m_5 => clamp_min_6
# p_m_6 => clamp_min_7
# p_m_7 => clamp_min_8
# p_m_8 => clamp_min_9
# p_m_9 => clamp_min_10
# sub_10 => sub_10
# sub_12 => sub_12
# sub_14 => sub_14
# sub_16 => sub_16
# sub_18 => sub_18
# sub_20 => sub_20
# sub_22 => sub_22
# sub_24 => sub_24
# sub_26 => sub_26
# sub_28 => sub_28
# sub_3 => sub_3
# sub_30 => sub_30
# sub_32 => sub_32
# sub_34 => sub_34
# sub_36 => sub_36
# sub_38 => sub_38
# sub_6 => sub_6
# sub_8 => sub_8
# sum_1 => sum_1
# sum_10 => sum_10
# sum_11 => sum_11
# sum_12 => sum_12
# sum_13 => sum_13
# sum_14 => sum_14
# sum_15 => sum_15
# sum_16 => sum_16
# sum_17 => sum_17
# sum_18 => sum_18
# sum_2 => sum_2
# sum_3 => sum_3
# sum_4 => sum_4
# sum_5 => sum_5
# sum_6 => sum_6
# sum_7 => sum_7
# sum_8 => sum_8
# sum_9 => sum_9
# tau_hi => sub_2
# tau_lo => sub_1
# tau_lo_1 => where
# tau_lo_10 => where_9
# tau_lo_11 => where_10
# tau_lo_12 => where_11
# tau_lo_13 => where_12
# tau_lo_14 => where_13
# tau_lo_15 => where_14
# tau_lo_16 => where_15
# tau_lo_17 => where_16
# tau_lo_2 => where_1
# tau_lo_3 => where_2
# tau_lo_4 => where_3
# tau_lo_5 => where_4
# tau_lo_6 => where_5
# tau_lo_7 => where_6
# tau_lo_8 => where_7
# tau_lo_9 => where_8
# tau_m => add
# tau_m_1 => add_1
# tau_m_10 => add_10
# tau_m_11 => add_11
# tau_m_12 => add_12
# tau_m_13 => add_13
# tau_m_14 => add_14
# tau_m_15 => add_15
# tau_m_16 => add_16
# tau_m_2 => add_2
# tau_m_3 => add_3
# tau_m_4 => add_4
# tau_m_5 => add_5
# tau_m_6 => add_6
# tau_m_7 => add_7
# tau_m_8 => add_8
# tau_m_9 => add_9
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 0.25), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %sub_1), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_5, 2), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_1, %div), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_6, 0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_1, [-1]), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_2, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %sub_4), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze, %add, %sub_1), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %div_1), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_1), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_8, 0), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_2, [-1]), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %sub_4), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_1, %add_1, %where), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %div_2), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_2), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_3, [-1]), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_4, 1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %sub_4), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %add_2, %where_1), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_2, %div_3), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_3), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_12, 0), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_4, [-1]), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %sub_4), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_3, %add_3, %where_2), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_3, %div_4), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_4), kwargs = {})
# %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_14, 0), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_5, [-1]), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_6, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_15, %sub_4), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_4, %add_4, %where_3), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %div_5), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_5), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_6, [-1]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_7, 1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %sub_4), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_5, %add_5, %where_4), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %div_6), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_6), kwargs = {})
# %clamp_min_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_18, 0), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_7, [-1]), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_8, 1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %sub_4), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_6, %add_6, %where_5), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_6, %div_7), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_7), kwargs = {})
# %clamp_min_8 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_20, 0), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_8, [-1]), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_9, 1), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_21, %sub_4), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_7, %add_7, %where_6), kwargs = {})
# %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_7, %div_8), kwargs = {})
# %sub_22 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_8), kwargs = {})
# %clamp_min_9 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_22, 0), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_9, [-1]), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_10, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %sub_4), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_8, %add_8, %where_7), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_8, %div_9), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_9), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_24, 0), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_10, [-1]), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_11, 1), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %sub_4), kwargs = {})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_9, %add_9, %where_8), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_9, %div_10), kwargs = {})
# %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_10), kwargs = {})
# %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_26, 0), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_11, [-1]), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_12, 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %sub_4), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_10, %add_10, %where_9), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_10, %div_11), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_11), kwargs = {})
# %clamp_min_12 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_28, 0), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_12, [-1]), kwargs = {})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_13, 1), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %sub_4), kwargs = {})
# %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_11, %add_11, %where_10), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_11, %div_12), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_12), kwargs = {})
# %clamp_min_13 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_30, 0), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_13, [-1]), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_14, 1), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %sub_4), kwargs = {})
# %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_12, %add_12, %where_11), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_12, %div_13), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_13), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_32, 0), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_14, [-1]), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_15, 1), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_33, %sub_4), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_13, %add_13, %where_12), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_13, %div_14), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_14), kwargs = {})
# %clamp_min_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_34, 0), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_15, [-1]), kwargs = {})
# %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_16, 1), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %sub_4), kwargs = {})
# %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_14, %add_14, %where_13), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_14, %div_15), kwargs = {})
# %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_15), kwargs = {})
# %clamp_min_16 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_36, 0), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_16, [-1]), kwargs = {})
# %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_17, 1), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_37, %sub_4), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_15, %add_15, %where_14), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_15, %div_16), kwargs = {})
# %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_16), kwargs = {})
# %clamp_min_17 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_38, 0), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_17, [-1]), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_18, 1), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_39, %sub_4), kwargs = {})
# %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_16, %add_16, %where_15), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr8'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0(in_out_ptr8, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp9 = 0.25
tmp10 = tmp6 - tmp9
tmp11 = tmp10 - tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = tmp0 - tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp1 - tmp14
tmp19 = triton_helpers.maximum(tmp18, tmp16)
tmp20 = tmp17 + tmp19
tmp21 = tmp3 - tmp14
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = tmp20 + tmp22
tmp24 = tmp5 - tmp14
tmp25 = triton_helpers.maximum(tmp24, tmp16)
tmp26 = tmp23 + tmp25
tmp27 = tmp26 - tmp7
tmp28 = tmp0 - tmp8
tmp29 = triton_helpers.maximum(tmp28, tmp16)
tmp30 = tmp1 - tmp8
tmp31 = triton_helpers.maximum(tmp30, tmp16)
tmp32 = tmp29 + tmp31
tmp33 = tmp3 - tmp8
tmp34 = triton_helpers.maximum(tmp33, tmp16)
tmp35 = tmp32 + tmp34
tmp36 = tmp5 - tmp8
tmp37 = triton_helpers.maximum(tmp36, tmp16)
tmp38 = tmp35 + tmp37
tmp39 = tmp38 - tmp7
tmp40 = tmp27 * tmp39
tmp41 = tmp40 >= tmp16
tmp42 = tl.where(tmp41, tmp14, tmp8)
tmp43 = tmp13 * tmp12
tmp44 = tmp42 + tmp43
tmp45 = tmp0 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp16)
tmp47 = tmp1 - tmp44
tmp48 = triton_helpers.maximum(tmp47, tmp16)
tmp49 = tmp46 + tmp48
tmp50 = tmp3 - tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp16)
tmp52 = tmp49 + tmp51
tmp53 = tmp5 - tmp44
tmp54 = triton_helpers.maximum(tmp53, tmp16)
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp7
tmp57 = tmp56 * tmp39
tmp58 = tmp57 >= tmp16
tmp59 = tl.where(tmp58, tmp44, tmp42)
tmp60 = tmp43 * tmp12
tmp61 = tmp59 + tmp60
tmp62 = tmp0 - tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp16)
tmp64 = tmp1 - tmp61
tmp65 = triton_helpers.maximum(tmp64, tmp16)
tmp66 = tmp63 + tmp65
tmp67 = tmp3 - tmp61
tmp68 = triton_helpers.maximum(tmp67, tmp16)
tmp69 = tmp66 + tmp68
tmp70 = tmp5 - tmp61
tmp71 = triton_helpers.maximum(tmp70, tmp16)
tmp72 = tmp69 + tmp71
tmp73 = tmp72 - tmp7
tmp74 = tmp73 * tmp39
tmp75 = tmp74 >= tmp16
tmp76 = tl.where(tmp75, tmp61, tmp59)
tmp77 = tmp60 * tmp12
tmp78 = tmp76 + tmp77
tmp79 = tmp0 - tmp78
tmp80 = triton_helpers.maximum(tmp79, tmp16)
tmp81 = tmp1 - tmp78
tmp82 = triton_helpers.maximum(tmp81, tmp16)
tmp83 = tmp80 + tmp82
tmp84 = tmp3 - tmp78
tmp85 = triton_helpers.maximum(tmp84, tmp16)
tmp86 = tmp83 + tmp85
tmp87 = tmp5 - tmp78
tmp88 = triton_helpers.maximum(tmp87, tmp16)
tmp89 = tmp86 + tmp88
tmp90 = tmp89 - tmp7
tmp91 = tmp90 * tmp39
tmp92 = tmp91 >= tmp16
tmp93 = tl.where(tmp92, tmp78, tmp76)
tmp94 = tmp77 * tmp12
tmp95 = tmp93 + tmp94
tmp96 = tmp0 - tmp95
tmp97 = triton_helpers.maximum(tmp96, tmp16)
tmp98 = tmp1 - tmp95
tmp99 = triton_helpers.maximum(tmp98, tmp16)
tmp100 = tmp97 + tmp99
tmp101 = tmp3 - tmp95
tmp102 = triton_helpers.maximum(tmp101, tmp16)
tmp103 = tmp100 + tmp102
tmp104 = tmp5 - tmp95
tmp105 = triton_helpers.maximum(tmp104, tmp16)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 - tmp7
tmp108 = tmp107 * tmp39
tmp109 = tmp108 >= tmp16
tmp110 = tl.where(tmp109, tmp95, tmp93)
tmp111 = tmp94 * tmp12
tmp112 = tmp110 + tmp111
tmp113 = tmp0 - tmp112
tmp114 = triton_helpers.maximum(tmp113, tmp16)
tmp115 = tmp1 - tmp112
tmp116 = triton_helpers.maximum(tmp115, tmp16)
tmp117 = tmp114 + tmp116
tmp118 = tmp3 - tmp112
tmp119 = triton_helpers.maximum(tmp118, tmp16)
tmp120 = tmp117 + tmp119
tmp121 = tmp5 - tmp112
tmp122 = triton_helpers.maximum(tmp121, tmp16)
tmp123 = tmp120 + tmp122
tmp124 = tmp123 - tmp7
tmp125 = tmp124 * tmp39
tmp126 = tmp125 >= tmp16
tmp127 = tl.where(tmp126, tmp112, tmp110)
tmp128 = tmp111 * tmp12
tmp129 = tmp127 + tmp128
tmp130 = tmp0 - tmp129
tmp131 = triton_helpers.maximum(tmp130, tmp16)
tmp132 = tmp1 - tmp129
tmp133 = triton_helpers.maximum(tmp132, tmp16)
tmp134 = tmp131 + tmp133
tmp135 = tmp3 - tmp129
tmp136 = triton_helpers.maximum(tmp135, tmp16)
tmp137 = tmp134 + tmp136
tmp138 = tmp5 - tmp129
tmp139 = triton_helpers.maximum(tmp138, tmp16)
tmp140 = tmp137 + tmp139
tmp141 = tmp140 - tmp7
tmp142 = tmp141 * tmp39
tmp143 = tmp142 >= tmp16
tmp144 = tl.where(tmp143, tmp129, tmp127)
tmp145 = tmp128 * tmp12
tmp146 = tmp144 + tmp145
tmp147 = tmp0 - tmp146
tmp148 = triton_helpers.maximum(tmp147, tmp16)
tmp149 = tmp1 - tmp146
tmp150 = triton_helpers.maximum(tmp149, tmp16)
tmp151 = tmp148 + tmp150
tmp152 = tmp3 - tmp146
tmp153 = triton_helpers.maximum(tmp152, tmp16)
tmp154 = tmp151 + tmp153
tmp155 = tmp5 - tmp146
tmp156 = triton_helpers.maximum(tmp155, tmp16)
tmp157 = tmp154 + tmp156
tmp158 = tmp157 - tmp7
tmp159 = tmp158 * tmp39
tmp160 = tmp159 >= tmp16
tmp161 = tl.where(tmp160, tmp146, tmp144)
tmp162 = tmp145 * tmp12
tmp163 = tmp161 + tmp162
tmp164 = tmp0 - tmp163
tmp165 = triton_helpers.maximum(tmp164, tmp16)
tmp166 = tmp1 - tmp163
tmp167 = triton_helpers.maximum(tmp166, tmp16)
tmp168 = tmp165 + tmp167
tmp169 = tmp3 - tmp163
tmp170 = triton_helpers.maximum(tmp169, tmp16)
tmp171 = tmp168 + tmp170
tmp172 = tmp5 - tmp163
tmp173 = triton_helpers.maximum(tmp172, tmp16)
tmp174 = tmp171 + tmp173
tmp175 = tmp174 - tmp7
tmp176 = tmp175 * tmp39
tmp177 = tmp176 >= tmp16
tmp178 = tl.where(tmp177, tmp163, tmp161)
tmp179 = tmp162 * tmp12
tmp180 = tmp178 + tmp179
tmp181 = tmp0 - tmp180
tmp182 = triton_helpers.maximum(tmp181, tmp16)
tmp183 = tmp1 - tmp180
tmp184 = triton_helpers.maximum(tmp183, tmp16)
tmp185 = tmp182 + tmp184
tmp186 = tmp3 - tmp180
tmp187 = triton_helpers.maximum(tmp186, tmp16)
tmp188 = tmp185 + tmp187
tmp189 = tmp5 - tmp180
tmp190 = triton_helpers.maximum(tmp189, tmp16)
tmp191 = tmp188 + tmp190
tmp192 = tmp191 - tmp7
tmp193 = tmp192 * tmp39
tmp194 = tmp193 >= tmp16
tmp195 = tl.where(tmp194, tmp180, tmp178)
tmp196 = tmp179 * tmp12
tmp197 = tmp195 + tmp196
tmp198 = tmp0 - tmp197
tmp199 = triton_helpers.maximum(tmp198, tmp16)
tmp200 = tmp1 - tmp197
tmp201 = triton_helpers.maximum(tmp200, tmp16)
tmp202 = tmp199 + tmp201
tmp203 = tmp3 - tmp197
tmp204 = triton_helpers.maximum(tmp203, tmp16)
tmp205 = tmp202 + tmp204
tmp206 = tmp5 - tmp197
tmp207 = triton_helpers.maximum(tmp206, tmp16)
tmp208 = tmp205 + tmp207
tmp209 = tmp208 - tmp7
tmp210 = tmp209 * tmp39
tmp211 = tmp210 >= tmp16
tmp212 = tl.where(tmp211, tmp197, tmp195)
tmp213 = tmp196 * tmp12
tmp214 = tmp212 + tmp213
tmp215 = tmp0 - tmp214
tmp216 = triton_helpers.maximum(tmp215, tmp16)
tmp217 = tmp1 - tmp214
tmp218 = triton_helpers.maximum(tmp217, tmp16)
tmp219 = tmp216 + tmp218
tmp220 = tmp3 - tmp214
tmp221 = triton_helpers.maximum(tmp220, tmp16)
tmp222 = tmp219 + tmp221
tmp223 = tmp5 - tmp214
tmp224 = triton_helpers.maximum(tmp223, tmp16)
tmp225 = tmp222 + tmp224
tmp226 = tmp225 - tmp7
tmp227 = tmp226 * tmp39
tmp228 = tmp227 >= tmp16
tmp229 = tl.where(tmp228, tmp214, tmp212)
tmp230 = tmp213 * tmp12
tmp231 = tmp229 + tmp230
tmp232 = tmp0 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp16)
tmp234 = tmp1 - tmp231
tmp235 = triton_helpers.maximum(tmp234, tmp16)
tmp236 = tmp233 + tmp235
tmp237 = tmp3 - tmp231
tmp238 = triton_helpers.maximum(tmp237, tmp16)
tmp239 = tmp236 + tmp238
tmp240 = tmp5 - tmp231
tmp241 = triton_helpers.maximum(tmp240, tmp16)
tmp242 = tmp239 + tmp241
tmp243 = tmp242 - tmp7
tmp244 = tmp243 * tmp39
tmp245 = tmp244 >= tmp16
tmp246 = tl.where(tmp245, tmp231, tmp229)
tmp247 = tmp230 * tmp12
tmp248 = tmp246 + tmp247
tmp249 = tmp0 - tmp248
tmp250 = triton_helpers.maximum(tmp249, tmp16)
tmp251 = tmp1 - tmp248
tmp252 = triton_helpers.maximum(tmp251, tmp16)
tmp253 = tmp250 + tmp252
tmp254 = tmp3 - tmp248
tmp255 = triton_helpers.maximum(tmp254, tmp16)
tmp256 = tmp253 + tmp255
tmp257 = tmp5 - tmp248
tmp258 = triton_helpers.maximum(tmp257, tmp16)
tmp259 = tmp256 + tmp258
tmp260 = tmp259 - tmp7
tmp261 = tmp260 * tmp39
tmp262 = tmp261 >= tmp16
tmp263 = tl.where(tmp262, tmp248, tmp246)
tmp264 = tmp247 * tmp12
tmp265 = tmp263 + tmp264
tmp266 = tmp0 - tmp265
tmp267 = triton_helpers.maximum(tmp266, tmp16)
tmp268 = tmp1 - tmp265
tmp269 = triton_helpers.maximum(tmp268, tmp16)
tmp270 = tmp267 + tmp269
tmp271 = tmp3 - tmp265
tmp272 = triton_helpers.maximum(tmp271, tmp16)
tmp273 = tmp270 + tmp272
tmp274 = tmp5 - tmp265
tmp275 = triton_helpers.maximum(tmp274, tmp16)
tmp276 = tmp273 + tmp275
tmp277 = tmp276 - tmp7
tmp278 = tmp277 * tmp39
tmp279 = tmp278 >= tmp16
tmp280 = tl.where(tmp279, tmp265, tmp263)
tmp281 = tmp264 * tmp12
tmp282 = tmp280 + tmp281
tmp283 = tmp0 - tmp282
tmp284 = triton_helpers.maximum(tmp283, tmp16)
tmp285 = tmp1 - tmp282
tmp286 = triton_helpers.maximum(tmp285, tmp16)
tmp287 = tmp284 + tmp286
tmp288 = tmp3 - tmp282
tmp289 = triton_helpers.maximum(tmp288, tmp16)
tmp290 = tmp287 + tmp289
tmp291 = tmp5 - tmp282
tmp292 = triton_helpers.maximum(tmp291, tmp16)
tmp293 = tmp290 + tmp292
tmp294 = tmp293 - tmp7
tmp295 = tmp294 * tmp39
tmp296 = tmp295 >= tmp16
tmp297 = tl.where(tmp296, tmp282, tmp280)
tmp298 = tmp281 * tmp12
tmp299 = tmp297 + tmp298
tmp300 = tmp0 - tmp299
tmp301 = triton_helpers.maximum(tmp300, tmp16)
tmp302 = tmp1 - tmp299
tmp303 = triton_helpers.maximum(tmp302, tmp16)
tmp304 = tmp301 + tmp303
tmp305 = tmp3 - tmp299
tmp306 = triton_helpers.maximum(tmp305, tmp16)
tmp307 = tmp304 + tmp306
tmp308 = tmp5 - tmp299
tmp309 = triton_helpers.maximum(tmp308, tmp16)
tmp310 = tmp307 + tmp309
tmp311 = tmp310 - tmp7
tmp312 = tmp311 * tmp39
tmp313 = tmp312 >= tmp16
tmp314 = tl.where(tmp313, tmp299, tmp297)
tl.store(in_out_ptr8 + (x0), tmp314, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/av/cavd5cj3tg3vokgvnwac6qqbcheawux6j46atz5eq3ovasf7jkxk.py
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, sub_3, clamp, sum_1, f_lo, dm_2, dm_3, dm_4, dm_5, dm_6, dm_7, dm_8, dm_9, dm_10, dm_11, dm_12, dm_13, dm_14, dm_15, dm_16, dm_17, dm_18, tau_m_17, sub_40, p_m_17, sum_19, f_m_17, mul_18, tau_lo_18, dm_19, tau_m_18, sub_42, p_m_18, sum_20, f_m_18, mul_19, tau_lo_19, dm_20, tau_m_19, sub_44, p_m_19, sum_21, f_m_19, tau_lo_20, dm_21, tau_m_20, sub_46, p_m_20, sum_22, f_m_20, mul_21, tau_lo_21, dm_22, tau_m_21, sub_48, p_m_21, sum_23, f_m_21, mul_22, tau_lo_22, dm_23, tau_m_22, sub_50, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_52, p_m_23, sum_25, f_m_23, mul_24, tau_lo_24, dm_25, tau_m_24, sub_54, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_56, p_m_25, sum_27, f_m_25, mul_26, tau_lo_26, dm_27, tau_m_26, sub_58, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_60, p_m_27, sum_29, f_m_27, tau_lo_28, dm_29, tau_m_28, sub_62, p_m_28, sum_30, tau_lo_29, dm_30, tau_m_29, sub_64, p_m_29, sum_31, tau_lo_30, dm_31, tau_m_30, sub_66, p_m_30, sum_32, tau_lo_31, dm_32, tau_m_31, sub_68, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_70, p_m_32, sum_34, tau_lo_33, dm_34, tau_m_33, sub_72, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_74, p_m_34, sum_36, tau_lo_35, dm_36, tau_m_35, sub_76, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_78, p_m_36, sum_38, tau_lo_37, dm_38, tau_m_37, sub_80, p_m_37, sum_39, tau_lo_38, dm_39, tau_m_38, sub_82, p_m_38, sum_40, tau_lo_39, dm_40, tau_m_39, sub_84, p_m_39, sum_41, tau_lo_40], Original ATen: [aten.max, aten.sub, aten.div, aten.clamp, aten.sum, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm => sub_5
# dm_1 => div
# dm_10 => div_9
# dm_11 => div_10
# dm_12 => div_11
# dm_13 => div_12
# dm_14 => div_13
# dm_15 => div_14
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_2 => div_1
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_3 => div_2
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_4 => div_3
# dm_40 => div_39
# dm_5 => div_4
# dm_6 => div_5
# dm_7 => div_6
# dm_8 => div_7
# dm_9 => div_8
# f_lo => sub_4
# f_m_17 => sub_41
# f_m_18 => sub_43
# f_m_19 => sub_45
# f_m_20 => sub_47
# f_m_21 => sub_49
# f_m_23 => sub_53
# f_m_25 => sub_57
# f_m_27 => sub_61
# max_1 => max_1
# mul_18 => mul_18
# mul_19 => mul_19
# mul_21 => mul_21
# mul_22 => mul_22
# mul_24 => mul_24
# mul_26 => mul_26
# p_m_17 => clamp_min_18
# p_m_18 => clamp_min_19
# p_m_19 => clamp_min_20
# p_m_20 => clamp_min_21
# p_m_21 => clamp_min_22
# p_m_22 => clamp_min_23
# p_m_23 => clamp_min_24
# p_m_24 => clamp_min_25
# p_m_25 => clamp_min_26
# p_m_26 => clamp_min_27
# p_m_27 => clamp_min_28
# p_m_28 => clamp_min_29
# p_m_29 => clamp_min_30
# p_m_30 => clamp_min_31
# p_m_31 => clamp_min_32
# p_m_32 => clamp_min_33
# p_m_33 => clamp_min_34
# p_m_34 => clamp_min_35
# p_m_35 => clamp_min_36
# p_m_36 => clamp_min_37
# p_m_37 => clamp_min_38
# p_m_38 => clamp_min_39
# p_m_39 => clamp_min_40
# sub_3 => sub_3
# sub_40 => sub_40
# sub_42 => sub_42
# sub_44 => sub_44
# sub_46 => sub_46
# sub_48 => sub_48
# sub_50 => sub_50
# sub_52 => sub_52
# sub_54 => sub_54
# sub_56 => sub_56
# sub_58 => sub_58
# sub_60 => sub_60
# sub_62 => sub_62
# sub_64 => sub_64
# sub_66 => sub_66
# sub_68 => sub_68
# sub_70 => sub_70
# sub_72 => sub_72
# sub_74 => sub_74
# sub_76 => sub_76
# sub_78 => sub_78
# sub_80 => sub_80
# sub_82 => sub_82
# sub_84 => sub_84
# sum_1 => sum_1
# sum_19 => sum_19
# sum_20 => sum_20
# sum_21 => sum_21
# sum_22 => sum_22
# sum_23 => sum_23
# sum_24 => sum_24
# sum_25 => sum_25
# sum_26 => sum_26
# sum_27 => sum_27
# sum_28 => sum_28
# sum_29 => sum_29
# sum_30 => sum_30
# sum_31 => sum_31
# sum_32 => sum_32
# sum_33 => sum_33
# sum_34 => sum_34
# sum_35 => sum_35
# sum_36 => sum_36
# sum_37 => sum_37
# sum_38 => sum_38
# sum_39 => sum_39
# sum_40 => sum_40
# sum_41 => sum_41
# tau_hi => sub_2
# tau_lo => sub_1
# tau_lo_18 => where_17
# tau_lo_19 => where_18
# tau_lo_20 => where_19
# tau_lo_21 => where_20
# tau_lo_22 => where_21
# tau_lo_23 => where_22
# tau_lo_24 => where_23
# tau_lo_25 => where_24
# tau_lo_26 => where_25
# tau_lo_27 => where_26
# tau_lo_28 => where_27
# tau_lo_29 => where_28
# tau_lo_30 => where_29
# tau_lo_31 => where_30
# tau_lo_32 => where_31
# tau_lo_33 => where_32
# tau_lo_34 => where_33
# tau_lo_35 => where_34
# tau_lo_36 => where_35
# tau_lo_37 => where_36
# tau_lo_38 => where_37
# tau_lo_39 => where_38
# tau_lo_40 => where_39
# tau_m_17 => add_17
# tau_m_18 => add_18
# tau_m_19 => add_19
# tau_m_20 => add_20
# tau_m_21 => add_21
# tau_m_22 => add_22
# tau_m_23 => add_23
# tau_m_24 => add_24
# tau_m_25 => add_25
# tau_m_26 => add_26
# tau_m_27 => add_27
# tau_m_28 => add_28
# tau_m_29 => add_29
# tau_m_30 => add_30
# tau_m_31 => add_31
# tau_m_32 => add_32
# tau_m_33 => add_33
# tau_m_34 => add_34
# tau_m_35 => add_35
# tau_m_36 => add_36
# tau_m_37 => add_37
# tau_m_38 => add_38
# tau_m_39 => add_39
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 0.25), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_2, %sub_1), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_5, 2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {})
# %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_16, %div_17), kwargs = {})
# %sub_40 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_17), kwargs = {})
# %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_40, 0), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_18, [-1]), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_19, 1), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %sub_4), kwargs = {})
# %where_17 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_17, %add_17, %where_16), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_17, %div_18), kwargs = {})
# %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_18), kwargs = {})
# %clamp_min_19 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_42, 0), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_19, [-1]), kwargs = {})
# %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_20, 1), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_43, %sub_4), kwargs = {})
# %where_18 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_18, %add_18, %where_17), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_18, %div_19), kwargs = {})
# %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_19), kwargs = {})
# %clamp_min_20 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_44, 0), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_20, [-1]), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_21, 1), kwargs = {})
# %where_19 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_19, %add_19, %where_18), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_19, %div_20), kwargs = {})
# %sub_46 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_20), kwargs = {})
# %clamp_min_21 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_46, 0), kwargs = {})
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_21, [-1]), kwargs = {})
# %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_22, 1), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %sub_4), kwargs = {})
# %where_20 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_20, %add_20, %where_19), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %add_21 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_20, %div_21), kwargs = {})
# %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_21), kwargs = {})
# %clamp_min_22 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_48, 0), kwargs = {})
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_22, [-1]), kwargs = {})
# %sub_49 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_23, 1), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_49, %sub_4), kwargs = {})
# %where_21 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_21, %add_21, %where_20), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_21, %div_22), kwargs = {})
# %sub_50 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_22), kwargs = {})
# %clamp_min_23 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_50, 0), kwargs = {})
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_23, [-1]), kwargs = {})
# %where_22 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_22, %add_22, %where_21), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %add_23 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_22, %div_23), kwargs = {})
# %sub_52 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_23), kwargs = {})
# %clamp_min_24 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_52, 0), kwargs = {})
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_24, [-1]), kwargs = {})
# %sub_53 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_25, 1), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_53, %sub_4), kwargs = {})
# %where_23 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_23, %add_23, %where_22), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_23, %div_24), kwargs = {})
# %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_24), kwargs = {})
# %clamp_min_25 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_54, 0), kwargs = {})
# %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_25, [-1]), kwargs = {})
# %where_24 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_24, %add_24, %where_23), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {})
# %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_25), kwargs = {})
# %clamp_min_26 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_56, 0), kwargs = {})
# %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_26, [-1]), kwargs = {})
# %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_27, 1), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_57, %sub_4), kwargs = {})
# %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {})
# %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_26), kwargs = {})
# %clamp_min_27 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_58, 0), kwargs = {})
# %sum_28 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_27, [-1]), kwargs = {})
# %where_26 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_26, %add_26, %where_25), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {})
# %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_27), kwargs = {})
# %clamp_min_28 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_60, 0), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_28, [-1]), kwargs = {})
# %sub_61 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_29, 1), kwargs = {})
# %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {})
# %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_28), kwargs = {})
# %clamp_min_29 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_62, 0), kwargs = {})
# %sum_30 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_29, [-1]), kwargs = {})
# %where_28 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_28, %add_28, %where_27), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {})
# %sub_64 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_29), kwargs = {})
# %clamp_min_30 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_64, 0), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_30, [-1]), kwargs = {})
# %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {})
# %sub_66 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_30), kwargs = {})
# %clamp_min_31 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_66, 0), kwargs = {})
# %sum_32 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_31, [-1]), kwargs = {})
# %where_30 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_30, %add_30, %where_29), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {})
# %sub_68 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_31), kwargs = {})
# %clamp_min_32 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_68, 0), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_32, [-1]), kwargs = {})
# %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_31, %add_31, %where_30), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_31, %div_32), kwargs = {})
# %sub_70 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_32), kwargs = {})
# %clamp_min_33 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_70, 0), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_33, [-1]), kwargs = {})
# %where_32 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_32, %add_32, %where_31), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %add_33 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_32, %div_33), kwargs = {})
# %sub_72 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_33), kwargs = {})
# %clamp_min_34 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_72, 0), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_34, [-1]), kwargs = {})
# %where_33 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_33, %add_33, %where_32), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {})
# %sub_74 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_34), kwargs = {})
# %clamp_min_35 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_74, 0), kwargs = {})
# %sum_36 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_35, [-1]), kwargs = {})
# %where_34 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_34, %add_34, %where_33), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {})
# %sub_76 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_35), kwargs = {})
# %clamp_min_36 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_76, 0), kwargs = {})
# %sum_37 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_36, [-1]), kwargs = {})
# %where_35 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_35, %add_35, %where_34), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {})
# %sub_78 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_36), kwargs = {})
# %clamp_min_37 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_78, 0), kwargs = {})
# %sum_38 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_37, [-1]), kwargs = {})
# %where_36 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_36, %add_36, %where_35), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {})
# %sub_80 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_37), kwargs = {})
# %clamp_min_38 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_80, 0), kwargs = {})
# %sum_39 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_38, [-1]), kwargs = {})
# %where_37 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_37, %add_37, %where_36), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {})
# %sub_82 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_38), kwargs = {})
# %clamp_min_39 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_82, 0), kwargs = {})
# %sum_40 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_39, [-1]), kwargs = {})
# %where_38 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_38, %add_38, %where_37), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {})
# %sub_84 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_39), kwargs = {})
# %clamp_min_40 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_84, 0), kwargs = {})
# %sum_41 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_40, [-1]), kwargs = {})
# %where_39 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_39, %add_39, %where_38), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1', 'mutated_arg_names': ['in_out_ptr16'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1(in_out_ptr16, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.25
tmp8 = tmp6 - tmp7
tmp9 = 1.0
tmp10 = tmp6 - tmp9
tmp11 = tmp8 - tmp10
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp12
tmp15 = tmp14 * tmp12
tmp16 = tmp15 * tmp12
tmp17 = tmp16 * tmp12
tmp18 = tmp17 * tmp12
tmp19 = tmp18 * tmp12
tmp20 = tmp19 * tmp12
tmp21 = tmp20 * tmp12
tmp22 = tmp21 * tmp12
tmp23 = tmp22 * tmp12
tmp24 = tmp23 * tmp12
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp12
tmp27 = tmp26 * tmp12
tmp28 = tmp27 * tmp12
tmp29 = tmp28 * tmp12
tmp30 = tmp29 * tmp12
tmp32 = tmp31 + tmp30
tmp33 = tmp0 - tmp32
tmp34 = 0.0
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp1 - tmp32
tmp37 = triton_helpers.maximum(tmp36, tmp34)
tmp38 = tmp35 + tmp37
tmp39 = tmp3 - tmp32
tmp40 = triton_helpers.maximum(tmp39, tmp34)
tmp41 = tmp38 + tmp40
tmp42 = tmp5 - tmp32
tmp43 = triton_helpers.maximum(tmp42, tmp34)
tmp44 = tmp41 + tmp43
tmp45 = tmp44 - tmp9
tmp46 = tmp0 - tmp10
tmp47 = triton_helpers.maximum(tmp46, tmp34)
tmp48 = tmp1 - tmp10
tmp49 = triton_helpers.maximum(tmp48, tmp34)
tmp50 = tmp47 + tmp49
tmp51 = tmp3 - tmp10
tmp52 = triton_helpers.maximum(tmp51, tmp34)
tmp53 = tmp50 + tmp52
tmp54 = tmp5 - tmp10
tmp55 = triton_helpers.maximum(tmp54, tmp34)
tmp56 = tmp53 + tmp55
tmp57 = tmp56 - tmp9
tmp58 = tmp45 * tmp57
tmp59 = tmp58 >= tmp34
tmp60 = tl.where(tmp59, tmp32, tmp31)
tmp61 = tmp30 * tmp12
tmp62 = tmp60 + tmp61
tmp63 = tmp0 - tmp62
tmp64 = triton_helpers.maximum(tmp63, tmp34)
tmp65 = tmp1 - tmp62
tmp66 = triton_helpers.maximum(tmp65, tmp34)
tmp67 = tmp64 + tmp66
tmp68 = tmp3 - tmp62
tmp69 = triton_helpers.maximum(tmp68, tmp34)
tmp70 = tmp67 + tmp69
tmp71 = tmp5 - tmp62
tmp72 = triton_helpers.maximum(tmp71, tmp34)
tmp73 = tmp70 + tmp72
tmp74 = tmp73 - tmp9
tmp75 = tmp74 * tmp57
tmp76 = tmp75 >= tmp34
tmp77 = tl.where(tmp76, tmp62, tmp60)
tmp78 = tmp61 * tmp12
tmp79 = tmp77 + tmp78
tmp80 = tmp0 - tmp79
tmp81 = triton_helpers.maximum(tmp80, tmp34)
tmp82 = tmp1 - tmp79
tmp83 = triton_helpers.maximum(tmp82, tmp34)
tmp84 = tmp81 + tmp83
tmp85 = tmp3 - tmp79
tmp86 = triton_helpers.maximum(tmp85, tmp34)
tmp87 = tmp84 + tmp86
tmp88 = tmp5 - tmp79
tmp89 = triton_helpers.maximum(tmp88, tmp34)
tmp90 = tmp87 + tmp89
tmp91 = tmp90 - tmp9
tmp92 = tmp91 * tmp57
tmp93 = tmp92 >= tmp34
tmp94 = tl.where(tmp93, tmp79, tmp77)
tmp95 = tmp78 * tmp12
tmp96 = tmp94 + tmp95
tmp97 = tmp0 - tmp96
tmp98 = triton_helpers.maximum(tmp97, tmp34)
tmp99 = tmp1 - tmp96
tmp100 = triton_helpers.maximum(tmp99, tmp34)
tmp101 = tmp98 + tmp100
tmp102 = tmp3 - tmp96
tmp103 = triton_helpers.maximum(tmp102, tmp34)
tmp104 = tmp101 + tmp103
tmp105 = tmp5 - tmp96
tmp106 = triton_helpers.maximum(tmp105, tmp34)
tmp107 = tmp104 + tmp106
tmp108 = tmp107 - tmp9
tmp109 = tmp108 * tmp57
tmp110 = tmp109 >= tmp34
tmp111 = tl.where(tmp110, tmp96, tmp94)
tmp112 = tmp95 * tmp12
tmp113 = tmp111 + tmp112
tmp114 = tmp0 - tmp113
tmp115 = triton_helpers.maximum(tmp114, tmp34)
tmp116 = tmp1 - tmp113
tmp117 = triton_helpers.maximum(tmp116, tmp34)
tmp118 = tmp115 + tmp117
tmp119 = tmp3 - tmp113
tmp120 = triton_helpers.maximum(tmp119, tmp34)
tmp121 = tmp118 + tmp120
tmp122 = tmp5 - tmp113
tmp123 = triton_helpers.maximum(tmp122, tmp34)
tmp124 = tmp121 + tmp123
tmp125 = tmp124 - tmp9
tmp126 = tmp125 * tmp57
tmp127 = tmp126 >= tmp34
tmp128 = tl.where(tmp127, tmp113, tmp111)
tmp129 = tmp112 * tmp12
tmp130 = tmp128 + tmp129
tmp131 = tmp0 - tmp130
tmp132 = triton_helpers.maximum(tmp131, tmp34)
tmp133 = tmp1 - tmp130
tmp134 = triton_helpers.maximum(tmp133, tmp34)
tmp135 = tmp132 + tmp134
tmp136 = tmp3 - tmp130
tmp137 = triton_helpers.maximum(tmp136, tmp34)
tmp138 = tmp135 + tmp137
tmp139 = tmp5 - tmp130
tmp140 = triton_helpers.maximum(tmp139, tmp34)
tmp141 = tmp138 + tmp140
tmp142 = tmp141 - tmp9
tmp143 = tmp142 * tmp57
tmp144 = tmp143 >= tmp34
tmp145 = tl.where(tmp144, tmp130, tmp128)
tmp146 = tmp129 * tmp12
tmp147 = tmp145 + tmp146
tmp148 = tmp0 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp34)
tmp150 = tmp1 - tmp147
tmp151 = triton_helpers.maximum(tmp150, tmp34)
tmp152 = tmp149 + tmp151
tmp153 = tmp3 - tmp147
tmp154 = triton_helpers.maximum(tmp153, tmp34)
tmp155 = tmp152 + tmp154
tmp156 = tmp5 - tmp147
tmp157 = triton_helpers.maximum(tmp156, tmp34)
tmp158 = tmp155 + tmp157
tmp159 = tmp158 - tmp9
tmp160 = tmp159 * tmp57
tmp161 = tmp160 >= tmp34
tmp162 = tl.where(tmp161, tmp147, tmp145)
tmp163 = tmp146 * tmp12
tmp164 = tmp162 + tmp163
tmp165 = tmp0 - tmp164
tmp166 = triton_helpers.maximum(tmp165, tmp34)
tmp167 = tmp1 - tmp164
tmp168 = triton_helpers.maximum(tmp167, tmp34)
tmp169 = tmp166 + tmp168
tmp170 = tmp3 - tmp164
tmp171 = triton_helpers.maximum(tmp170, tmp34)
tmp172 = tmp169 + tmp171
tmp173 = tmp5 - tmp164
tmp174 = triton_helpers.maximum(tmp173, tmp34)
tmp175 = tmp172 + tmp174
tmp176 = tmp175 - tmp9
tmp177 = tmp176 * tmp57
tmp178 = tmp177 >= tmp34
tmp179 = tl.where(tmp178, tmp164, tmp162)
tmp180 = tmp163 * tmp12
tmp181 = tmp179 + tmp180
tmp182 = tmp0 - tmp181
tmp183 = triton_helpers.maximum(tmp182, tmp34)
tmp184 = tmp1 - tmp181
tmp185 = triton_helpers.maximum(tmp184, tmp34)
tmp186 = tmp183 + tmp185
tmp187 = tmp3 - tmp181
tmp188 = triton_helpers.maximum(tmp187, tmp34)
tmp189 = tmp186 + tmp188
tmp190 = tmp5 - tmp181
tmp191 = triton_helpers.maximum(tmp190, tmp34)
tmp192 = tmp189 + tmp191
tmp193 = tmp192 - tmp9
tmp194 = tmp193 * tmp57
tmp195 = tmp194 >= tmp34
tmp196 = tl.where(tmp195, tmp181, tmp179)
tmp197 = tmp180 * tmp12
tmp198 = tmp196 + tmp197
tmp199 = tmp0 - tmp198
tmp200 = triton_helpers.maximum(tmp199, tmp34)
tmp201 = tmp1 - tmp198
tmp202 = triton_helpers.maximum(tmp201, tmp34)
tmp203 = tmp200 + tmp202
tmp204 = tmp3 - tmp198
tmp205 = triton_helpers.maximum(tmp204, tmp34)
tmp206 = tmp203 + tmp205
tmp207 = tmp5 - tmp198
tmp208 = triton_helpers.maximum(tmp207, tmp34)
tmp209 = tmp206 + tmp208
tmp210 = tmp209 - tmp9
tmp211 = tmp210 * tmp57
tmp212 = tmp211 >= tmp34
tmp213 = tl.where(tmp212, tmp198, tmp196)
tmp214 = tmp197 * tmp12
tmp215 = tmp213 + tmp214
tmp216 = tmp0 - tmp215
tmp217 = triton_helpers.maximum(tmp216, tmp34)
tmp218 = tmp1 - tmp215
tmp219 = triton_helpers.maximum(tmp218, tmp34)
tmp220 = tmp217 + tmp219
tmp221 = tmp3 - tmp215
tmp222 = triton_helpers.maximum(tmp221, tmp34)
tmp223 = tmp220 + tmp222
tmp224 = tmp5 - tmp215
tmp225 = triton_helpers.maximum(tmp224, tmp34)
tmp226 = tmp223 + tmp225
tmp227 = tmp226 - tmp9
tmp228 = tmp227 * tmp57
tmp229 = tmp228 >= tmp34
tmp230 = tl.where(tmp229, tmp215, tmp213)
tmp231 = tmp214 * tmp12
tmp232 = tmp230 + tmp231
tmp233 = tmp0 - tmp232
tmp234 = triton_helpers.maximum(tmp233, tmp34)
tmp235 = tmp1 - tmp232
tmp236 = triton_helpers.maximum(tmp235, tmp34)
tmp237 = tmp234 + tmp236
tmp238 = tmp3 - tmp232
tmp239 = triton_helpers.maximum(tmp238, tmp34)
tmp240 = tmp237 + tmp239
tmp241 = tmp5 - tmp232
tmp242 = triton_helpers.maximum(tmp241, tmp34)
tmp243 = tmp240 + tmp242
tmp244 = tmp243 - tmp9
tmp245 = tmp244 * tmp57
tmp246 = tmp245 >= tmp34
tmp247 = tl.where(tmp246, tmp232, tmp230)
tmp248 = tmp231 * tmp12
tmp249 = tmp247 + tmp248
tmp250 = tmp0 - tmp249
tmp251 = triton_helpers.maximum(tmp250, tmp34)
tmp252 = tmp1 - tmp249
tmp253 = triton_helpers.maximum(tmp252, tmp34)
tmp254 = tmp251 + tmp253
tmp255 = tmp3 - tmp249
tmp256 = triton_helpers.maximum(tmp255, tmp34)
tmp257 = tmp254 + tmp256
tmp258 = tmp5 - tmp249
tmp259 = triton_helpers.maximum(tmp258, tmp34)
tmp260 = tmp257 + tmp259
tmp261 = tmp260 - tmp9
tmp262 = tmp261 * tmp57
tmp263 = tmp262 >= tmp34
tmp264 = tl.where(tmp263, tmp249, tmp247)
tmp265 = tmp248 * tmp12
tmp266 = tmp264 + tmp265
tmp267 = tmp0 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp34)
tmp269 = tmp1 - tmp266
tmp270 = triton_helpers.maximum(tmp269, tmp34)
tmp271 = tmp268 + tmp270
tmp272 = tmp3 - tmp266
tmp273 = triton_helpers.maximum(tmp272, tmp34)
tmp274 = tmp271 + tmp273
tmp275 = tmp5 - tmp266
tmp276 = triton_helpers.maximum(tmp275, tmp34)
tmp277 = tmp274 + tmp276
tmp278 = tmp277 - tmp9
tmp279 = tmp278 * tmp57
tmp280 = tmp279 >= tmp34
tmp281 = tl.where(tmp280, tmp266, tmp264)
tmp282 = tmp265 * tmp12
tmp283 = tmp281 + tmp282
tmp284 = tmp0 - tmp283
tmp285 = triton_helpers.maximum(tmp284, tmp34)
tmp286 = tmp1 - tmp283
tmp287 = triton_helpers.maximum(tmp286, tmp34)
tmp288 = tmp285 + tmp287
tmp289 = tmp3 - tmp283
tmp290 = triton_helpers.maximum(tmp289, tmp34)
tmp291 = tmp288 + tmp290
tmp292 = tmp5 - tmp283
tmp293 = triton_helpers.maximum(tmp292, tmp34)
tmp294 = tmp291 + tmp293
tmp295 = tmp294 - tmp9
tmp296 = tmp295 * tmp57
tmp297 = tmp296 >= tmp34
tmp298 = tl.where(tmp297, tmp283, tmp281)
tmp299 = tmp282 * tmp12
tmp300 = tmp298 + tmp299
tmp301 = tmp0 - tmp300
tmp302 = triton_helpers.maximum(tmp301, tmp34)
tmp303 = tmp1 - tmp300
tmp304 = triton_helpers.maximum(tmp303, tmp34)
tmp305 = tmp302 + tmp304
tmp306 = tmp3 - tmp300
tmp307 = triton_helpers.maximum(tmp306, tmp34)
tmp308 = tmp305 + tmp307
tmp309 = tmp5 - tmp300
tmp310 = triton_helpers.maximum(tmp309, tmp34)
tmp311 = tmp308 + tmp310
tmp312 = tmp311 - tmp9
tmp313 = tmp312 * tmp57
tmp314 = tmp313 >= tmp34
tmp315 = tl.where(tmp314, tmp300, tmp298)
tmp316 = tmp299 * tmp12
tmp317 = tmp315 + tmp316
tmp318 = tmp0 - tmp317
tmp319 = triton_helpers.maximum(tmp318, tmp34)
tmp320 = tmp1 - tmp317
tmp321 = triton_helpers.maximum(tmp320, tmp34)
tmp322 = tmp319 + tmp321
tmp323 = tmp3 - tmp317
tmp324 = triton_helpers.maximum(tmp323, tmp34)
tmp325 = tmp322 + tmp324
tmp326 = tmp5 - tmp317
tmp327 = triton_helpers.maximum(tmp326, tmp34)
tmp328 = tmp325 + tmp327
tmp329 = tmp328 - tmp9
tmp330 = tmp329 * tmp57
tmp331 = tmp330 >= tmp34
tmp332 = tl.where(tmp331, tmp317, tmp315)
tmp333 = tmp316 * tmp12
tmp334 = tmp332 + tmp333
tmp335 = tmp0 - tmp334
tmp336 = triton_helpers.maximum(tmp335, tmp34)
tmp337 = tmp1 - tmp334
tmp338 = triton_helpers.maximum(tmp337, tmp34)
tmp339 = tmp336 + tmp338
tmp340 = tmp3 - tmp334
tmp341 = triton_helpers.maximum(tmp340, tmp34)
tmp342 = tmp339 + tmp341
tmp343 = tmp5 - tmp334
tmp344 = triton_helpers.maximum(tmp343, tmp34)
tmp345 = tmp342 + tmp344
tmp346 = tmp345 - tmp9
tmp347 = tmp346 * tmp57
tmp348 = tmp347 >= tmp34
tmp349 = tl.where(tmp348, tmp334, tmp332)
tmp350 = tmp333 * tmp12
tmp351 = tmp349 + tmp350
tmp352 = tmp0 - tmp351
tmp353 = triton_helpers.maximum(tmp352, tmp34)
tmp354 = tmp1 - tmp351
tmp355 = triton_helpers.maximum(tmp354, tmp34)
tmp356 = tmp353 + tmp355
tmp357 = tmp3 - tmp351
tmp358 = triton_helpers.maximum(tmp357, tmp34)
tmp359 = tmp356 + tmp358
tmp360 = tmp5 - tmp351
tmp361 = triton_helpers.maximum(tmp360, tmp34)
tmp362 = tmp359 + tmp361
tmp363 = tmp362 - tmp9
tmp364 = tmp363 * tmp57
tmp365 = tmp364 >= tmp34
tmp366 = tl.where(tmp365, tmp351, tmp349)
tmp367 = tmp350 * tmp12
tmp368 = tmp366 + tmp367
tmp369 = tmp0 - tmp368
tmp370 = triton_helpers.maximum(tmp369, tmp34)
tmp371 = tmp1 - tmp368
tmp372 = triton_helpers.maximum(tmp371, tmp34)
tmp373 = tmp370 + tmp372
tmp374 = tmp3 - tmp368
tmp375 = triton_helpers.maximum(tmp374, tmp34)
tmp376 = tmp373 + tmp375
tmp377 = tmp5 - tmp368
tmp378 = triton_helpers.maximum(tmp377, tmp34)
tmp379 = tmp376 + tmp378
tmp380 = tmp379 - tmp9
tmp381 = tmp380 * tmp57
tmp382 = tmp381 >= tmp34
tmp383 = tl.where(tmp382, tmp368, tmp366)
tmp384 = tmp367 * tmp12
tmp385 = tmp383 + tmp384
tmp386 = tmp0 - tmp385
tmp387 = triton_helpers.maximum(tmp386, tmp34)
tmp388 = tmp1 - tmp385
tmp389 = triton_helpers.maximum(tmp388, tmp34)
tmp390 = tmp387 + tmp389
tmp391 = tmp3 - tmp385
tmp392 = triton_helpers.maximum(tmp391, tmp34)
tmp393 = tmp390 + tmp392
tmp394 = tmp5 - tmp385
tmp395 = triton_helpers.maximum(tmp394, tmp34)
tmp396 = tmp393 + tmp395
tmp397 = tmp396 - tmp9
tmp398 = tmp397 * tmp57
tmp399 = tmp398 >= tmp34
tmp400 = tl.where(tmp399, tmp385, tmp383)
tmp401 = tmp384 * tmp12
tmp402 = tmp400 + tmp401
tmp403 = tmp0 - tmp402
tmp404 = triton_helpers.maximum(tmp403, tmp34)
tmp405 = tmp1 - tmp402
tmp406 = triton_helpers.maximum(tmp405, tmp34)
tmp407 = tmp404 + tmp406
tmp408 = tmp3 - tmp402
tmp409 = triton_helpers.maximum(tmp408, tmp34)
tmp410 = tmp407 + tmp409
tmp411 = tmp5 - tmp402
tmp412 = triton_helpers.maximum(tmp411, tmp34)
tmp413 = tmp410 + tmp412
tmp414 = tmp413 - tmp9
tmp415 = tmp414 * tmp57
tmp416 = tmp415 >= tmp34
tmp417 = tl.where(tmp416, tmp402, tmp400)
tmp418 = tmp401 * tmp12
tmp419 = tmp417 + tmp418
tmp420 = tmp0 - tmp419
tmp421 = triton_helpers.maximum(tmp420, tmp34)
tmp422 = tmp1 - tmp419
tmp423 = triton_helpers.maximum(tmp422, tmp34)
tmp424 = tmp421 + tmp423
tmp425 = tmp3 - tmp419
tmp426 = triton_helpers.maximum(tmp425, tmp34)
tmp427 = tmp424 + tmp426
tmp428 = tmp5 - tmp419
tmp429 = triton_helpers.maximum(tmp428, tmp34)
tmp430 = tmp427 + tmp429
tmp431 = tmp430 - tmp9
tmp432 = tmp431 * tmp57
tmp433 = tmp432 >= tmp34
tmp434 = tl.where(tmp433, tmp419, tmp417)
tl.store(out_ptr0 + (x0), tmp30, xmask)
tl.store(in_out_ptr16 + (x0), tmp434, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/yz/cyzkhbz7eylqmmsct2eisz274ptkf4xwckrryhdzr5c5xpld426a.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_86, p_m_40], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# p_m_40 => clamp_min_41
# sub_86 => sub_86
# tau_m_40 => add_40
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %sub_86 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_40), kwargs = {})
# %clamp_min_41 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_86, 0), kwargs = {})
triton_poi_fused_add_clamp_div_sub_2 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp1 + tmp26
tmp28 = tmp0 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sr/csrhhpesnpea33gn4n5cx2now3swxe3jsyebpywtfcwh4ny67d7d.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sum_42, f_m_40, mul_41, tau_lo_41], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# f_lo => sub_4
# f_m_40 => sub_87
# max_1 => max_1
# mul_41 => mul_41
# sub_3 => sub_3
# sum_1 => sum_1
# sum_42 => sum_42
# tau_lo => sub_1
# tau_lo_41 => where_40
# tau_m_40 => add_40
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %sum_42 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_41, [-1]), kwargs = {})
# %sub_87 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_42, 1), kwargs = {})
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_87, %sub_4), kwargs = {})
# %where_40 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_40, %add_40, %where_39), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (x0), xmask)
tmp33 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp32 + tmp57
tmp59 = tl.where(tmp31, tmp58, tmp32)
tl.store(in_out_ptr0 + (x0), tmp59, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ri/cri5g5cwgob26dhdepr7at5q3uqt7g5zn5i44mttvzuefjefarlt.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_88, p_m_41], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# p_m_41 => clamp_min_42
# sub_88 => sub_88
# tau_m_41 => add_41
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {})
# %sub_88 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_41), kwargs = {})
# %clamp_min_42 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_88, 0), kwargs = {})
triton_poi_fused_add_clamp_div_sub_4 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp1 + tmp27
tmp29 = tmp0 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6v/c6v66pcoz5wwqqj3qdoavhcalhw4ggic7f5bbgnf4wulrawivqmo.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sum_43, f_m_41, mul_42, tau_lo_42], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# f_lo => sub_4
# f_m_41 => sub_89
# max_1 => max_1
# mul_42 => mul_42
# sub_3 => sub_3
# sum_1 => sum_1
# sum_43 => sum_43
# tau_lo => sub_1
# tau_lo_42 => where_41
# tau_m_41 => add_41
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {})
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_42, [-1]), kwargs = {})
# %sub_89 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_43, 1), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_89, %sub_4), kwargs = {})
# %where_41 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_41, %add_41, %where_40), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_out_ptr0 + (x0), xmask)
tmp33 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp57 * tmp34
tmp59 = tmp32 + tmp58
tmp60 = tl.where(tmp31, tmp59, tmp32)
tl.store(in_out_ptr0 + (x0), tmp60, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lg/clgmpssl52or2lky2wemwq23igzgtq7po6uel2vq4hq3f6g72lml.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_90], Original ATen: [aten.div, aten.add, aten.sub]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# sub_90 => sub_90
# tau_m_42 => add_42
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {})
# %sub_90 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_42), kwargs = {})
triton_poi_fused_add_div_sub_6 = async_compile.triton('triton_poi_fused_add_div_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp1 + tmp28
tmp30 = tmp0 - tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jl/cjlkx2otyy2fduimnbn7ccxqmgq2wq4zlcyndzdk767y2f6xq7z2.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# f_lo => sub_4
# f_m_42 => sub_91
# max_1 => max_1
# mul_43 => mul_43
# p_m_42 => clamp_min_43
# sub_3 => sub_3
# sum_1 => sum_1
# sum_44 => sum_44
# tau_lo => sub_1
# tau_lo_43 => where_42
# tau_m_42 => add_42
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {})
# %clamp_min_43 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_90, 0), kwargs = {})
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_43, [-1]), kwargs = {})
# %sub_91 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_44, 1), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_91, %sub_4), kwargs = {})
# %where_42 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_42, %add_42, %where_41), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + (x0), xmask)
tmp37 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp36 + tmp63
tmp65 = tl.where(tmp35, tmp64, tmp36)
tl.store(in_out_ptr0 + (x0), tmp65, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/45/c45ntljmhht4tqet2q6qjdfrins55gu6vaasohzaiqpfex6sk7ob.py
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_92], Original ATen: [aten.div, aten.add, aten.sub]
# Source node to ATen node mapping:
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# dm_44 => div_43
# sub_92 => sub_92
# tau_m_43 => add_43
# Graph fragment:
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {})
# %sub_92 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_43), kwargs = {})
triton_poi_fused_add_div_sub_8 = async_compile.triton('triton_poi_fused_add_div_sub_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_sub_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp28 * tmp3
tmp30 = tmp1 + tmp29
tmp31 = tmp0 - tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lz/clzcekwx3vzl5uustsl63asl5aemxd4bfzovv6jgg7f5yz6ljzjf.py
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, p_m_43, sum_45, f_m_43, mul_44, tau_lo_44, dm_45, tau_m_44, sub_94, p_m_44, sum_46, f_m_44, mul_45, tau_lo_45, dm_46, tau_m_45, sub_96, p_m_45, sum_47, f_m_45, mul_46, tau_lo_46, dm_47, tau_m_46, sub_98, p_m_46, sum_48, f_m_46, mul_47, tau_lo_47, dm_48, tau_m_47, sub_100, p_m_47, sum_49, f_m_47, mul_48, tau_lo_48, dm_49, tau_m_48, sub_102, p_m_48, sum_50, f_m_48, tau_lo_49, dm_50, tau_m_49, sub_104, p_m_49, sum_52], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
# Source node to ATen node mapping:
# clamp => clamp_min
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# dm_44 => div_43
# dm_45 => div_44
# dm_46 => div_45
# dm_47 => div_46
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# f_lo => sub_4
# f_m_43 => sub_93
# f_m_44 => sub_95
# f_m_45 => sub_97
# f_m_46 => sub_99
# f_m_47 => sub_101
# f_m_48 => sub_103
# max_1 => max_1
# mul_44 => mul_44
# mul_45 => mul_45
# mul_46 => mul_46
# mul_47 => mul_47
# mul_48 => mul_48
# p_m_43 => clamp_min_44
# p_m_44 => clamp_min_45
# p_m_45 => clamp_min_46
# p_m_46 => clamp_min_47
# p_m_47 => clamp_min_48
# p_m_48 => clamp_min_49
# p_m_49 => clamp_min_50
# sub_100 => sub_100
# sub_102 => sub_102
# sub_104 => sub_104
# sub_3 => sub_3
# sub_94 => sub_94
# sub_96 => sub_96
# sub_98 => sub_98
# sum_1 => sum_1
# sum_45 => sum_45
# sum_46 => sum_46
# sum_47 => sum_47
# sum_48 => sum_48
# sum_49 => sum_49
# sum_50 => sum_50
# sum_52 => sum_52
# tau_lo => sub_1
# tau_lo_44 => where_43
# tau_lo_45 => where_44
# tau_lo_46 => where_45
# tau_lo_47 => where_46
# tau_lo_48 => where_47
# tau_lo_49 => where_48
# tau_m_43 => add_43
# tau_m_44 => add_44
# tau_m_45 => add_45
# tau_m_46 => add_46
# tau_m_47 => add_47
# tau_m_48 => add_48
# tau_m_49 => add_49
# Graph fragment:
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%arg0_1, -1, True), kwargs = {})
# %sub_1 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, 1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %sub_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min, [-1]), kwargs = {})
# %sub_4 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {})
# %clamp_min_44 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_92, 0), kwargs = {})
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_44, [-1]), kwargs = {})
# %sub_93 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_45, 1), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_93, %sub_4), kwargs = {})
# %where_43 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_43, %add_43, %where_42), kwargs = {})
# %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {})
# %add_44 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_43, %div_44), kwargs = {})
# %sub_94 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_44), kwargs = {})
# %clamp_min_45 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_94, 0), kwargs = {})
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_45, [-1]), kwargs = {})
# %sub_95 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_46, 1), kwargs = {})
# %mul_45 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_95, %sub_4), kwargs = {})
# %where_44 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_44, %add_44, %where_43), kwargs = {})
# %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {})
# %add_45 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_44, %div_45), kwargs = {})
# %sub_96 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_45), kwargs = {})
# %clamp_min_46 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_96, 0), kwargs = {})
# %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_46, [-1]), kwargs = {})
# %sub_97 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_47, 1), kwargs = {})
# %mul_46 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_97, %sub_4), kwargs = {})
# %where_45 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_45, %add_45, %where_44), kwargs = {})
# %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {})
# %add_46 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_45, %div_46), kwargs = {})
# %sub_98 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_46), kwargs = {})
# %clamp_min_47 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_98, 0), kwargs = {})
# %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_47, [-1]), kwargs = {})
# %sub_99 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_48, 1), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_99, %sub_4), kwargs = {})
# %where_46 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_46, %add_46, %where_45), kwargs = {})
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {})
# %sub_100 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_47), kwargs = {})
# %clamp_min_48 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_100, 0), kwargs = {})
# %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_48, [-1]), kwargs = {})
# %sub_101 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_49, 1), kwargs = {})
# %mul_48 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_101, %sub_4), kwargs = {})
# %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {})
# %sub_102 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_48), kwargs = {})
# %clamp_min_49 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_102, 0), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_49, [-1]), kwargs = {})
# %sub_103 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_50, 1), kwargs = {})
# %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_104 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_104, 0), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%clamp_min_50, [-1]), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_out_ptr0 + (x0), xmask)
tmp37 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp63 * tmp38
tmp65 = tmp36 + tmp64
tmp66 = tl.where(tmp35, tmp65, tmp36)
tmp67 = tmp64 * tmp38
tmp68 = tmp66 + tmp67
tmp69 = tmp14 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = tmp15 - tmp68
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = tmp70 + tmp72
tmp74 = tmp17 - tmp68
tmp75 = triton_helpers.maximum(tmp74, tmp1)
tmp76 = tmp73 + tmp75
tmp77 = tmp19 - tmp68
tmp78 = triton_helpers.maximum(tmp77, tmp1)
tmp79 = tmp76 + tmp78
tmp80 = tmp79 - tmp12
tmp81 = tmp80 * tmp33
tmp82 = tmp81 >= tmp1
tmp83 = tl.where(tmp82, tmp68, tmp66)
tmp84 = tmp67 * tmp38
tmp85 = tmp83 + tmp84
tmp86 = tmp14 - tmp85
tmp87 = triton_helpers.maximum(tmp86, tmp1)
tmp88 = tmp15 - tmp85
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = tmp87 + tmp89
tmp91 = tmp17 - tmp85
tmp92 = triton_helpers.maximum(tmp91, tmp1)
tmp93 = tmp90 + tmp92
tmp94 = tmp19 - tmp85
tmp95 = triton_helpers.maximum(tmp94, tmp1)
tmp96 = tmp93 + tmp95
tmp97 = tmp96 - tmp12
tmp98 = tmp97 * tmp33
tmp99 = tmp98 >= tmp1
tmp100 = tl.where(tmp99, tmp85, tmp83)
tmp101 = tmp84 * tmp38
tmp102 = tmp100 + tmp101
tmp103 = tmp14 - tmp102
tmp104 = triton_helpers.maximum(tmp103, tmp1)
tmp105 = tmp15 - tmp102
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = tmp104 + tmp106
tmp108 = tmp17 - tmp102
tmp109 = triton_helpers.maximum(tmp108, tmp1)
tmp110 = tmp107 + tmp109
tmp111 = tmp19 - tmp102
tmp112 = triton_helpers.maximum(tmp111, tmp1)
tmp113 = tmp110 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp33
tmp116 = tmp115 >= tmp1
tmp117 = tl.where(tmp116, tmp102, tmp100)
tmp118 = tmp101 * tmp38
tmp119 = tmp117 + tmp118
tmp120 = tmp14 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp1)
tmp122 = tmp15 - tmp119
tmp123 = triton_helpers.maximum(tmp122, tmp1)
tmp124 = tmp121 + tmp123
tmp125 = tmp17 - tmp119
tmp126 = triton_helpers.maximum(tmp125, tmp1)
tmp127 = tmp124 + tmp126
tmp128 = tmp19 - tmp119
tmp129 = triton_helpers.maximum(tmp128, tmp1)
tmp130 = tmp127 + tmp129
tmp131 = tmp130 - tmp12
tmp132 = tmp131 * tmp33
tmp133 = tmp132 >= tmp1
tmp134 = tl.where(tmp133, tmp119, tmp117)
tmp135 = tmp118 * tmp38
tmp136 = tmp134 + tmp135
tmp137 = tmp14 - tmp136
tmp138 = triton_helpers.maximum(tmp137, tmp1)
tmp139 = tmp15 - tmp136
tmp140 = triton_helpers.maximum(tmp139, tmp1)
tmp141 = tmp138 + tmp140
tmp142 = tmp17 - tmp136
tmp143 = triton_helpers.maximum(tmp142, tmp1)
tmp144 = tmp141 + tmp143
tmp145 = tmp19 - tmp136
tmp146 = triton_helpers.maximum(tmp145, tmp1)
tmp147 = tmp144 + tmp146
tmp148 = tmp147 - tmp12
tmp149 = tmp148 * tmp33
tmp150 = tmp149 >= tmp1
tmp151 = tl.where(tmp150, tmp136, tmp134)
tmp152 = tmp135 * tmp38
tmp153 = tmp151 + tmp152
tmp154 = tmp14 - tmp153
tmp155 = triton_helpers.maximum(tmp154, tmp1)
tmp156 = tmp15 - tmp153
tmp157 = triton_helpers.maximum(tmp156, tmp1)
tmp158 = tmp155 + tmp157
tmp159 = tmp17 - tmp153
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = tmp158 + tmp160
tmp162 = tmp19 - tmp153
tmp163 = triton_helpers.maximum(tmp162, tmp1)
tmp164 = tmp161 + tmp163
tl.store(out_ptr4 + (x0), tmp101, xmask)
tl.store(in_out_ptr2 + (x0), tmp151, xmask)
tl.store(out_ptr7 + (x0), tmp164, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qe/cqe4wxdrqmlpdjpsore2xrtnu7imed3hrgbsaycsyw5yzkt5vtf2.py
# Topologically Sorted Source Nodes: [dm_48, dm_49, dm_50, tau_m_49, sub_104, p_m_49, p_m_50], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# p_m_49 => clamp_min_50
# p_m_50 => div_50
# sub_104 => sub_104
# tau_m_49 => add_49
# Graph fragment:
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_104 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_104, 0), kwargs = {})
# %div_50 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_min_50, %unsqueeze_50), kwargs = {})
triton_poi_fused_add_clamp_div_sub_10 = async_compile.triton('triton_poi_fused_add_clamp_div_sub_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_sub_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp1 + tmp6
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf40 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, tau_m, sub_6, p_m, sum_2, f_m, sub_3, clamp, sum_1, f_lo, mul_1, tau_lo_1, dm_2, tau_m_1, sub_8, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_10, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_12, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_14, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_16, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_18, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_20, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_22, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_24, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_26, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_28, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_30, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_32, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_34, p_m_14, sum_16, f_m_14, mul_15, tau_lo_15, dm_16, tau_m_15, sub_36, p_m_15, sum_17, f_m_15, mul_16, tau_lo_16, dm_17, tau_m_16, sub_38, p_m_16, sum_18, f_m_16, mul_17, tau_lo_17], Original ATen: [aten.max, aten.sub, aten.div, aten.add, aten.clamp, aten.sum, aten.mul, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0.run(buf41, arg0_1, 64, grid=grid(64), stream=stream0)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf82 = reinterpret_tensor(buf81, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf81 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_hi, tau_lo, dm, dm_1, sub_3, clamp, sum_1, f_lo, dm_2, dm_3, dm_4, dm_5, dm_6, dm_7, dm_8, dm_9, dm_10, dm_11, dm_12, dm_13, dm_14, dm_15, dm_16, dm_17, dm_18, tau_m_17, sub_40, p_m_17, sum_19, f_m_17, mul_18, tau_lo_18, dm_19, tau_m_18, sub_42, p_m_18, sum_20, f_m_18, mul_19, tau_lo_19, dm_20, tau_m_19, sub_44, p_m_19, sum_21, f_m_19, tau_lo_20, dm_21, tau_m_20, sub_46, p_m_20, sum_22, f_m_20, mul_21, tau_lo_21, dm_22, tau_m_21, sub_48, p_m_21, sum_23, f_m_21, mul_22, tau_lo_22, dm_23, tau_m_22, sub_50, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_52, p_m_23, sum_25, f_m_23, mul_24, tau_lo_24, dm_25, tau_m_24, sub_54, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_56, p_m_25, sum_27, f_m_25, mul_26, tau_lo_26, dm_27, tau_m_26, sub_58, p_m_26, sum_28, tau_lo_27, dm_28, tau_m_27, sub_60, p_m_27, sum_29, f_m_27, tau_lo_28, dm_29, tau_m_28, sub_62, p_m_28, sum_30, tau_lo_29, dm_30, tau_m_29, sub_64, p_m_29, sum_31, tau_lo_30, dm_31, tau_m_30, sub_66, p_m_30, sum_32, tau_lo_31, dm_32, tau_m_31, sub_68, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_70, p_m_32, sum_34, tau_lo_33, dm_34, tau_m_33, sub_72, p_m_33, sum_35, tau_lo_34, dm_35, tau_m_34, sub_74, p_m_34, sum_36, tau_lo_35, dm_36, tau_m_35, sub_76, p_m_35, sum_37, tau_lo_36, dm_37, tau_m_36, sub_78, p_m_36, sum_38, tau_lo_37, dm_38, tau_m_37, sub_80, p_m_37, sum_39, tau_lo_38, dm_39, tau_m_38, sub_82, p_m_38, sum_40, tau_lo_39, dm_40, tau_m_39, sub_84, p_m_39, sum_41, tau_lo_40], Original ATen: [aten.max, aten.sub, aten.div, aten.clamp, aten.sum, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1.run(buf82, arg0_1, buf41, buf42, 64, grid=grid(64), stream=stream0)
buf83 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_86, p_m_40], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_2.run(arg0_1, buf82, buf42, buf83, 256, grid=grid(256), stream=stream0)
buf85 = buf82; del buf82 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sum_42, f_m_40, mul_41, tau_lo_41], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3.run(buf85, buf83, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf86 = buf83; del buf83 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sub_88, p_m_41], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_4.run(arg0_1, buf85, buf42, buf86, 256, grid=grid(256), stream=stream0)
buf88 = buf85; del buf85 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, tau_m_41, sum_43, f_m_41, mul_42, tau_lo_42], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5.run(buf88, buf86, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf89 = buf86; del buf86 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, sub_90], Original ATen: [aten.div, aten.add, aten.sub]
triton_poi_fused_add_div_sub_6.run(arg0_1, buf88, buf42, buf89, 256, grid=grid(256), stream=stream0)
buf91 = buf88; del buf88 # reuse
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, tau_m_42, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7.run(buf91, buf89, arg0_1, buf42, 64, grid=grid(64), stream=stream0)
buf92 = buf89; del buf89 # reuse
# Topologically Sorted Source Nodes: [dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, sub_92], Original ATen: [aten.div, aten.add, aten.sub]
triton_poi_fused_add_div_sub_8.run(arg0_1, buf91, buf42, buf92, 256, grid=grid(256), stream=stream0)
buf94 = buf91; del buf91 # reuse
buf100 = buf41; del buf41 # reuse
buf103 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf104 = reinterpret_tensor(buf103, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf103 # reuse
buf105 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1, tau_lo, sub_3, clamp, sum_1, f_lo, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, dm_42, dm_43, dm_44, tau_m_43, p_m_43, sum_45, f_m_43, mul_44, tau_lo_44, dm_45, tau_m_44, sub_94, p_m_44, sum_46, f_m_44, mul_45, tau_lo_45, dm_46, tau_m_45, sub_96, p_m_45, sum_47, f_m_45, mul_46, tau_lo_46, dm_47, tau_m_46, sub_98, p_m_46, sum_48, f_m_46, mul_47, tau_lo_47, dm_48, tau_m_47, sub_100, p_m_47, sum_49, f_m_47, mul_48, tau_lo_48, dm_49, tau_m_48, sub_102, p_m_48, sum_50, f_m_48, tau_lo_49, dm_50, tau_m_49, sub_104, p_m_49, sum_52], Original ATen: [aten.max, aten.sub, aten.clamp, aten.sum, aten.div, aten.add, aten.mul, aten.where]
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9.run(buf94, buf104, buf92, arg0_1, buf42, buf100, buf105, 64, grid=grid(64), stream=stream0)
del buf42
del buf94
buf106 = buf92; del buf92 # reuse
# Topologically Sorted Source Nodes: [dm_48, dm_49, dm_50, tau_m_49, sub_104, p_m_49, p_m_50], Original ATen: [aten.div, aten.add, aten.sub, aten.clamp]
triton_poi_fused_add_clamp_div_sub_10.run(arg0_1, buf104, buf100, buf105, buf106, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf100
del buf104
del buf105
return (buf106, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):
"""sparsemax: normalizing sparse transform (a la softmax), via bisection.
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Note: This function does not yet support normalizing along anything except
the last dimension. Please use transposing and views to achieve more
general behavior.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class SparsemaxBisectFunction(EntmaxBisectFunction):
@classmethod
def _gp(cls, x, alpha):
return x
@classmethod
def _gp_inv(cls, y, alpha):
return y
@classmethod
def _p(cls, x, alpha):
return torch.clamp(x, min=0)
@classmethod
def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True):
return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50,
ensure_sum_one=True)
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y > 0
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None, None
class SparsemaxBisect(nn.Module):
def __init__(self, dim=-1, n_iter=None):
"""sparsemax: normalizing sparse transform (a la softmax) via bisection
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
super().__init__()
def forward(self, X):
return sparsemax_bisect(X, dim=self.dim, n_iter=self.n_iter)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0(in_out_ptr8,
in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp9 = 0.25
tmp10 = tmp6 - tmp9
tmp11 = tmp10 - tmp8
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp8 + tmp13
tmp15 = tmp0 - tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp15, tmp16)
tmp18 = tmp1 - tmp14
tmp19 = triton_helpers.maximum(tmp18, tmp16)
tmp20 = tmp17 + tmp19
tmp21 = tmp3 - tmp14
tmp22 = triton_helpers.maximum(tmp21, tmp16)
tmp23 = tmp20 + tmp22
tmp24 = tmp5 - tmp14
tmp25 = triton_helpers.maximum(tmp24, tmp16)
tmp26 = tmp23 + tmp25
tmp27 = tmp26 - tmp7
tmp28 = tmp0 - tmp8
tmp29 = triton_helpers.maximum(tmp28, tmp16)
tmp30 = tmp1 - tmp8
tmp31 = triton_helpers.maximum(tmp30, tmp16)
tmp32 = tmp29 + tmp31
tmp33 = tmp3 - tmp8
tmp34 = triton_helpers.maximum(tmp33, tmp16)
tmp35 = tmp32 + tmp34
tmp36 = tmp5 - tmp8
tmp37 = triton_helpers.maximum(tmp36, tmp16)
tmp38 = tmp35 + tmp37
tmp39 = tmp38 - tmp7
tmp40 = tmp27 * tmp39
tmp41 = tmp40 >= tmp16
tmp42 = tl.where(tmp41, tmp14, tmp8)
tmp43 = tmp13 * tmp12
tmp44 = tmp42 + tmp43
tmp45 = tmp0 - tmp44
tmp46 = triton_helpers.maximum(tmp45, tmp16)
tmp47 = tmp1 - tmp44
tmp48 = triton_helpers.maximum(tmp47, tmp16)
tmp49 = tmp46 + tmp48
tmp50 = tmp3 - tmp44
tmp51 = triton_helpers.maximum(tmp50, tmp16)
tmp52 = tmp49 + tmp51
tmp53 = tmp5 - tmp44
tmp54 = triton_helpers.maximum(tmp53, tmp16)
tmp55 = tmp52 + tmp54
tmp56 = tmp55 - tmp7
tmp57 = tmp56 * tmp39
tmp58 = tmp57 >= tmp16
tmp59 = tl.where(tmp58, tmp44, tmp42)
tmp60 = tmp43 * tmp12
tmp61 = tmp59 + tmp60
tmp62 = tmp0 - tmp61
tmp63 = triton_helpers.maximum(tmp62, tmp16)
tmp64 = tmp1 - tmp61
tmp65 = triton_helpers.maximum(tmp64, tmp16)
tmp66 = tmp63 + tmp65
tmp67 = tmp3 - tmp61
tmp68 = triton_helpers.maximum(tmp67, tmp16)
tmp69 = tmp66 + tmp68
tmp70 = tmp5 - tmp61
tmp71 = triton_helpers.maximum(tmp70, tmp16)
tmp72 = tmp69 + tmp71
tmp73 = tmp72 - tmp7
tmp74 = tmp73 * tmp39
tmp75 = tmp74 >= tmp16
tmp76 = tl.where(tmp75, tmp61, tmp59)
tmp77 = tmp60 * tmp12
tmp78 = tmp76 + tmp77
tmp79 = tmp0 - tmp78
tmp80 = triton_helpers.maximum(tmp79, tmp16)
tmp81 = tmp1 - tmp78
tmp82 = triton_helpers.maximum(tmp81, tmp16)
tmp83 = tmp80 + tmp82
tmp84 = tmp3 - tmp78
tmp85 = triton_helpers.maximum(tmp84, tmp16)
tmp86 = tmp83 + tmp85
tmp87 = tmp5 - tmp78
tmp88 = triton_helpers.maximum(tmp87, tmp16)
tmp89 = tmp86 + tmp88
tmp90 = tmp89 - tmp7
tmp91 = tmp90 * tmp39
tmp92 = tmp91 >= tmp16
tmp93 = tl.where(tmp92, tmp78, tmp76)
tmp94 = tmp77 * tmp12
tmp95 = tmp93 + tmp94
tmp96 = tmp0 - tmp95
tmp97 = triton_helpers.maximum(tmp96, tmp16)
tmp98 = tmp1 - tmp95
tmp99 = triton_helpers.maximum(tmp98, tmp16)
tmp100 = tmp97 + tmp99
tmp101 = tmp3 - tmp95
tmp102 = triton_helpers.maximum(tmp101, tmp16)
tmp103 = tmp100 + tmp102
tmp104 = tmp5 - tmp95
tmp105 = triton_helpers.maximum(tmp104, tmp16)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 - tmp7
tmp108 = tmp107 * tmp39
tmp109 = tmp108 >= tmp16
tmp110 = tl.where(tmp109, tmp95, tmp93)
tmp111 = tmp94 * tmp12
tmp112 = tmp110 + tmp111
tmp113 = tmp0 - tmp112
tmp114 = triton_helpers.maximum(tmp113, tmp16)
tmp115 = tmp1 - tmp112
tmp116 = triton_helpers.maximum(tmp115, tmp16)
tmp117 = tmp114 + tmp116
tmp118 = tmp3 - tmp112
tmp119 = triton_helpers.maximum(tmp118, tmp16)
tmp120 = tmp117 + tmp119
tmp121 = tmp5 - tmp112
tmp122 = triton_helpers.maximum(tmp121, tmp16)
tmp123 = tmp120 + tmp122
tmp124 = tmp123 - tmp7
tmp125 = tmp124 * tmp39
tmp126 = tmp125 >= tmp16
tmp127 = tl.where(tmp126, tmp112, tmp110)
tmp128 = tmp111 * tmp12
tmp129 = tmp127 + tmp128
tmp130 = tmp0 - tmp129
tmp131 = triton_helpers.maximum(tmp130, tmp16)
tmp132 = tmp1 - tmp129
tmp133 = triton_helpers.maximum(tmp132, tmp16)
tmp134 = tmp131 + tmp133
tmp135 = tmp3 - tmp129
tmp136 = triton_helpers.maximum(tmp135, tmp16)
tmp137 = tmp134 + tmp136
tmp138 = tmp5 - tmp129
tmp139 = triton_helpers.maximum(tmp138, tmp16)
tmp140 = tmp137 + tmp139
tmp141 = tmp140 - tmp7
tmp142 = tmp141 * tmp39
tmp143 = tmp142 >= tmp16
tmp144 = tl.where(tmp143, tmp129, tmp127)
tmp145 = tmp128 * tmp12
tmp146 = tmp144 + tmp145
tmp147 = tmp0 - tmp146
tmp148 = triton_helpers.maximum(tmp147, tmp16)
tmp149 = tmp1 - tmp146
tmp150 = triton_helpers.maximum(tmp149, tmp16)
tmp151 = tmp148 + tmp150
tmp152 = tmp3 - tmp146
tmp153 = triton_helpers.maximum(tmp152, tmp16)
tmp154 = tmp151 + tmp153
tmp155 = tmp5 - tmp146
tmp156 = triton_helpers.maximum(tmp155, tmp16)
tmp157 = tmp154 + tmp156
tmp158 = tmp157 - tmp7
tmp159 = tmp158 * tmp39
tmp160 = tmp159 >= tmp16
tmp161 = tl.where(tmp160, tmp146, tmp144)
tmp162 = tmp145 * tmp12
tmp163 = tmp161 + tmp162
tmp164 = tmp0 - tmp163
tmp165 = triton_helpers.maximum(tmp164, tmp16)
tmp166 = tmp1 - tmp163
tmp167 = triton_helpers.maximum(tmp166, tmp16)
tmp168 = tmp165 + tmp167
tmp169 = tmp3 - tmp163
tmp170 = triton_helpers.maximum(tmp169, tmp16)
tmp171 = tmp168 + tmp170
tmp172 = tmp5 - tmp163
tmp173 = triton_helpers.maximum(tmp172, tmp16)
tmp174 = tmp171 + tmp173
tmp175 = tmp174 - tmp7
tmp176 = tmp175 * tmp39
tmp177 = tmp176 >= tmp16
tmp178 = tl.where(tmp177, tmp163, tmp161)
tmp179 = tmp162 * tmp12
tmp180 = tmp178 + tmp179
tmp181 = tmp0 - tmp180
tmp182 = triton_helpers.maximum(tmp181, tmp16)
tmp183 = tmp1 - tmp180
tmp184 = triton_helpers.maximum(tmp183, tmp16)
tmp185 = tmp182 + tmp184
tmp186 = tmp3 - tmp180
tmp187 = triton_helpers.maximum(tmp186, tmp16)
tmp188 = tmp185 + tmp187
tmp189 = tmp5 - tmp180
tmp190 = triton_helpers.maximum(tmp189, tmp16)
tmp191 = tmp188 + tmp190
tmp192 = tmp191 - tmp7
tmp193 = tmp192 * tmp39
tmp194 = tmp193 >= tmp16
tmp195 = tl.where(tmp194, tmp180, tmp178)
tmp196 = tmp179 * tmp12
tmp197 = tmp195 + tmp196
tmp198 = tmp0 - tmp197
tmp199 = triton_helpers.maximum(tmp198, tmp16)
tmp200 = tmp1 - tmp197
tmp201 = triton_helpers.maximum(tmp200, tmp16)
tmp202 = tmp199 + tmp201
tmp203 = tmp3 - tmp197
tmp204 = triton_helpers.maximum(tmp203, tmp16)
tmp205 = tmp202 + tmp204
tmp206 = tmp5 - tmp197
tmp207 = triton_helpers.maximum(tmp206, tmp16)
tmp208 = tmp205 + tmp207
tmp209 = tmp208 - tmp7
tmp210 = tmp209 * tmp39
tmp211 = tmp210 >= tmp16
tmp212 = tl.where(tmp211, tmp197, tmp195)
tmp213 = tmp196 * tmp12
tmp214 = tmp212 + tmp213
tmp215 = tmp0 - tmp214
tmp216 = triton_helpers.maximum(tmp215, tmp16)
tmp217 = tmp1 - tmp214
tmp218 = triton_helpers.maximum(tmp217, tmp16)
tmp219 = tmp216 + tmp218
tmp220 = tmp3 - tmp214
tmp221 = triton_helpers.maximum(tmp220, tmp16)
tmp222 = tmp219 + tmp221
tmp223 = tmp5 - tmp214
tmp224 = triton_helpers.maximum(tmp223, tmp16)
tmp225 = tmp222 + tmp224
tmp226 = tmp225 - tmp7
tmp227 = tmp226 * tmp39
tmp228 = tmp227 >= tmp16
tmp229 = tl.where(tmp228, tmp214, tmp212)
tmp230 = tmp213 * tmp12
tmp231 = tmp229 + tmp230
tmp232 = tmp0 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp16)
tmp234 = tmp1 - tmp231
tmp235 = triton_helpers.maximum(tmp234, tmp16)
tmp236 = tmp233 + tmp235
tmp237 = tmp3 - tmp231
tmp238 = triton_helpers.maximum(tmp237, tmp16)
tmp239 = tmp236 + tmp238
tmp240 = tmp5 - tmp231
tmp241 = triton_helpers.maximum(tmp240, tmp16)
tmp242 = tmp239 + tmp241
tmp243 = tmp242 - tmp7
tmp244 = tmp243 * tmp39
tmp245 = tmp244 >= tmp16
tmp246 = tl.where(tmp245, tmp231, tmp229)
tmp247 = tmp230 * tmp12
tmp248 = tmp246 + tmp247
tmp249 = tmp0 - tmp248
tmp250 = triton_helpers.maximum(tmp249, tmp16)
tmp251 = tmp1 - tmp248
tmp252 = triton_helpers.maximum(tmp251, tmp16)
tmp253 = tmp250 + tmp252
tmp254 = tmp3 - tmp248
tmp255 = triton_helpers.maximum(tmp254, tmp16)
tmp256 = tmp253 + tmp255
tmp257 = tmp5 - tmp248
tmp258 = triton_helpers.maximum(tmp257, tmp16)
tmp259 = tmp256 + tmp258
tmp260 = tmp259 - tmp7
tmp261 = tmp260 * tmp39
tmp262 = tmp261 >= tmp16
tmp263 = tl.where(tmp262, tmp248, tmp246)
tmp264 = tmp247 * tmp12
tmp265 = tmp263 + tmp264
tmp266 = tmp0 - tmp265
tmp267 = triton_helpers.maximum(tmp266, tmp16)
tmp268 = tmp1 - tmp265
tmp269 = triton_helpers.maximum(tmp268, tmp16)
tmp270 = tmp267 + tmp269
tmp271 = tmp3 - tmp265
tmp272 = triton_helpers.maximum(tmp271, tmp16)
tmp273 = tmp270 + tmp272
tmp274 = tmp5 - tmp265
tmp275 = triton_helpers.maximum(tmp274, tmp16)
tmp276 = tmp273 + tmp275
tmp277 = tmp276 - tmp7
tmp278 = tmp277 * tmp39
tmp279 = tmp278 >= tmp16
tmp280 = tl.where(tmp279, tmp265, tmp263)
tmp281 = tmp264 * tmp12
tmp282 = tmp280 + tmp281
tmp283 = tmp0 - tmp282
tmp284 = triton_helpers.maximum(tmp283, tmp16)
tmp285 = tmp1 - tmp282
tmp286 = triton_helpers.maximum(tmp285, tmp16)
tmp287 = tmp284 + tmp286
tmp288 = tmp3 - tmp282
tmp289 = triton_helpers.maximum(tmp288, tmp16)
tmp290 = tmp287 + tmp289
tmp291 = tmp5 - tmp282
tmp292 = triton_helpers.maximum(tmp291, tmp16)
tmp293 = tmp290 + tmp292
tmp294 = tmp293 - tmp7
tmp295 = tmp294 * tmp39
tmp296 = tmp295 >= tmp16
tmp297 = tl.where(tmp296, tmp282, tmp280)
tmp298 = tmp281 * tmp12
tmp299 = tmp297 + tmp298
tmp300 = tmp0 - tmp299
tmp301 = triton_helpers.maximum(tmp300, tmp16)
tmp302 = tmp1 - tmp299
tmp303 = triton_helpers.maximum(tmp302, tmp16)
tmp304 = tmp301 + tmp303
tmp305 = tmp3 - tmp299
tmp306 = triton_helpers.maximum(tmp305, tmp16)
tmp307 = tmp304 + tmp306
tmp308 = tmp5 - tmp299
tmp309 = triton_helpers.maximum(tmp308, tmp16)
tmp310 = tmp307 + tmp309
tmp311 = tmp310 - tmp7
tmp312 = tmp311 * tmp39
tmp313 = tmp312 >= tmp16
tmp314 = tl.where(tmp313, tmp299, tmp297)
tl.store(in_out_ptr8 + x0, tmp314, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1(in_out_ptr16,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr1 + x0, xmask)
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 0.25
tmp8 = tmp6 - tmp7
tmp9 = 1.0
tmp10 = tmp6 - tmp9
tmp11 = tmp8 - tmp10
tmp12 = 0.5
tmp13 = tmp11 * tmp12
tmp14 = tmp13 * tmp12
tmp15 = tmp14 * tmp12
tmp16 = tmp15 * tmp12
tmp17 = tmp16 * tmp12
tmp18 = tmp17 * tmp12
tmp19 = tmp18 * tmp12
tmp20 = tmp19 * tmp12
tmp21 = tmp20 * tmp12
tmp22 = tmp21 * tmp12
tmp23 = tmp22 * tmp12
tmp24 = tmp23 * tmp12
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp12
tmp27 = tmp26 * tmp12
tmp28 = tmp27 * tmp12
tmp29 = tmp28 * tmp12
tmp30 = tmp29 * tmp12
tmp32 = tmp31 + tmp30
tmp33 = tmp0 - tmp32
tmp34 = 0.0
tmp35 = triton_helpers.maximum(tmp33, tmp34)
tmp36 = tmp1 - tmp32
tmp37 = triton_helpers.maximum(tmp36, tmp34)
tmp38 = tmp35 + tmp37
tmp39 = tmp3 - tmp32
tmp40 = triton_helpers.maximum(tmp39, tmp34)
tmp41 = tmp38 + tmp40
tmp42 = tmp5 - tmp32
tmp43 = triton_helpers.maximum(tmp42, tmp34)
tmp44 = tmp41 + tmp43
tmp45 = tmp44 - tmp9
tmp46 = tmp0 - tmp10
tmp47 = triton_helpers.maximum(tmp46, tmp34)
tmp48 = tmp1 - tmp10
tmp49 = triton_helpers.maximum(tmp48, tmp34)
tmp50 = tmp47 + tmp49
tmp51 = tmp3 - tmp10
tmp52 = triton_helpers.maximum(tmp51, tmp34)
tmp53 = tmp50 + tmp52
tmp54 = tmp5 - tmp10
tmp55 = triton_helpers.maximum(tmp54, tmp34)
tmp56 = tmp53 + tmp55
tmp57 = tmp56 - tmp9
tmp58 = tmp45 * tmp57
tmp59 = tmp58 >= tmp34
tmp60 = tl.where(tmp59, tmp32, tmp31)
tmp61 = tmp30 * tmp12
tmp62 = tmp60 + tmp61
tmp63 = tmp0 - tmp62
tmp64 = triton_helpers.maximum(tmp63, tmp34)
tmp65 = tmp1 - tmp62
tmp66 = triton_helpers.maximum(tmp65, tmp34)
tmp67 = tmp64 + tmp66
tmp68 = tmp3 - tmp62
tmp69 = triton_helpers.maximum(tmp68, tmp34)
tmp70 = tmp67 + tmp69
tmp71 = tmp5 - tmp62
tmp72 = triton_helpers.maximum(tmp71, tmp34)
tmp73 = tmp70 + tmp72
tmp74 = tmp73 - tmp9
tmp75 = tmp74 * tmp57
tmp76 = tmp75 >= tmp34
tmp77 = tl.where(tmp76, tmp62, tmp60)
tmp78 = tmp61 * tmp12
tmp79 = tmp77 + tmp78
tmp80 = tmp0 - tmp79
tmp81 = triton_helpers.maximum(tmp80, tmp34)
tmp82 = tmp1 - tmp79
tmp83 = triton_helpers.maximum(tmp82, tmp34)
tmp84 = tmp81 + tmp83
tmp85 = tmp3 - tmp79
tmp86 = triton_helpers.maximum(tmp85, tmp34)
tmp87 = tmp84 + tmp86
tmp88 = tmp5 - tmp79
tmp89 = triton_helpers.maximum(tmp88, tmp34)
tmp90 = tmp87 + tmp89
tmp91 = tmp90 - tmp9
tmp92 = tmp91 * tmp57
tmp93 = tmp92 >= tmp34
tmp94 = tl.where(tmp93, tmp79, tmp77)
tmp95 = tmp78 * tmp12
tmp96 = tmp94 + tmp95
tmp97 = tmp0 - tmp96
tmp98 = triton_helpers.maximum(tmp97, tmp34)
tmp99 = tmp1 - tmp96
tmp100 = triton_helpers.maximum(tmp99, tmp34)
tmp101 = tmp98 + tmp100
tmp102 = tmp3 - tmp96
tmp103 = triton_helpers.maximum(tmp102, tmp34)
tmp104 = tmp101 + tmp103
tmp105 = tmp5 - tmp96
tmp106 = triton_helpers.maximum(tmp105, tmp34)
tmp107 = tmp104 + tmp106
tmp108 = tmp107 - tmp9
tmp109 = tmp108 * tmp57
tmp110 = tmp109 >= tmp34
tmp111 = tl.where(tmp110, tmp96, tmp94)
tmp112 = tmp95 * tmp12
tmp113 = tmp111 + tmp112
tmp114 = tmp0 - tmp113
tmp115 = triton_helpers.maximum(tmp114, tmp34)
tmp116 = tmp1 - tmp113
tmp117 = triton_helpers.maximum(tmp116, tmp34)
tmp118 = tmp115 + tmp117
tmp119 = tmp3 - tmp113
tmp120 = triton_helpers.maximum(tmp119, tmp34)
tmp121 = tmp118 + tmp120
tmp122 = tmp5 - tmp113
tmp123 = triton_helpers.maximum(tmp122, tmp34)
tmp124 = tmp121 + tmp123
tmp125 = tmp124 - tmp9
tmp126 = tmp125 * tmp57
tmp127 = tmp126 >= tmp34
tmp128 = tl.where(tmp127, tmp113, tmp111)
tmp129 = tmp112 * tmp12
tmp130 = tmp128 + tmp129
tmp131 = tmp0 - tmp130
tmp132 = triton_helpers.maximum(tmp131, tmp34)
tmp133 = tmp1 - tmp130
tmp134 = triton_helpers.maximum(tmp133, tmp34)
tmp135 = tmp132 + tmp134
tmp136 = tmp3 - tmp130
tmp137 = triton_helpers.maximum(tmp136, tmp34)
tmp138 = tmp135 + tmp137
tmp139 = tmp5 - tmp130
tmp140 = triton_helpers.maximum(tmp139, tmp34)
tmp141 = tmp138 + tmp140
tmp142 = tmp141 - tmp9
tmp143 = tmp142 * tmp57
tmp144 = tmp143 >= tmp34
tmp145 = tl.where(tmp144, tmp130, tmp128)
tmp146 = tmp129 * tmp12
tmp147 = tmp145 + tmp146
tmp148 = tmp0 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp34)
tmp150 = tmp1 - tmp147
tmp151 = triton_helpers.maximum(tmp150, tmp34)
tmp152 = tmp149 + tmp151
tmp153 = tmp3 - tmp147
tmp154 = triton_helpers.maximum(tmp153, tmp34)
tmp155 = tmp152 + tmp154
tmp156 = tmp5 - tmp147
tmp157 = triton_helpers.maximum(tmp156, tmp34)
tmp158 = tmp155 + tmp157
tmp159 = tmp158 - tmp9
tmp160 = tmp159 * tmp57
tmp161 = tmp160 >= tmp34
tmp162 = tl.where(tmp161, tmp147, tmp145)
tmp163 = tmp146 * tmp12
tmp164 = tmp162 + tmp163
tmp165 = tmp0 - tmp164
tmp166 = triton_helpers.maximum(tmp165, tmp34)
tmp167 = tmp1 - tmp164
tmp168 = triton_helpers.maximum(tmp167, tmp34)
tmp169 = tmp166 + tmp168
tmp170 = tmp3 - tmp164
tmp171 = triton_helpers.maximum(tmp170, tmp34)
tmp172 = tmp169 + tmp171
tmp173 = tmp5 - tmp164
tmp174 = triton_helpers.maximum(tmp173, tmp34)
tmp175 = tmp172 + tmp174
tmp176 = tmp175 - tmp9
tmp177 = tmp176 * tmp57
tmp178 = tmp177 >= tmp34
tmp179 = tl.where(tmp178, tmp164, tmp162)
tmp180 = tmp163 * tmp12
tmp181 = tmp179 + tmp180
tmp182 = tmp0 - tmp181
tmp183 = triton_helpers.maximum(tmp182, tmp34)
tmp184 = tmp1 - tmp181
tmp185 = triton_helpers.maximum(tmp184, tmp34)
tmp186 = tmp183 + tmp185
tmp187 = tmp3 - tmp181
tmp188 = triton_helpers.maximum(tmp187, tmp34)
tmp189 = tmp186 + tmp188
tmp190 = tmp5 - tmp181
tmp191 = triton_helpers.maximum(tmp190, tmp34)
tmp192 = tmp189 + tmp191
tmp193 = tmp192 - tmp9
tmp194 = tmp193 * tmp57
tmp195 = tmp194 >= tmp34
tmp196 = tl.where(tmp195, tmp181, tmp179)
tmp197 = tmp180 * tmp12
tmp198 = tmp196 + tmp197
tmp199 = tmp0 - tmp198
tmp200 = triton_helpers.maximum(tmp199, tmp34)
tmp201 = tmp1 - tmp198
tmp202 = triton_helpers.maximum(tmp201, tmp34)
tmp203 = tmp200 + tmp202
tmp204 = tmp3 - tmp198
tmp205 = triton_helpers.maximum(tmp204, tmp34)
tmp206 = tmp203 + tmp205
tmp207 = tmp5 - tmp198
tmp208 = triton_helpers.maximum(tmp207, tmp34)
tmp209 = tmp206 + tmp208
tmp210 = tmp209 - tmp9
tmp211 = tmp210 * tmp57
tmp212 = tmp211 >= tmp34
tmp213 = tl.where(tmp212, tmp198, tmp196)
tmp214 = tmp197 * tmp12
tmp215 = tmp213 + tmp214
tmp216 = tmp0 - tmp215
tmp217 = triton_helpers.maximum(tmp216, tmp34)
tmp218 = tmp1 - tmp215
tmp219 = triton_helpers.maximum(tmp218, tmp34)
tmp220 = tmp217 + tmp219
tmp221 = tmp3 - tmp215
tmp222 = triton_helpers.maximum(tmp221, tmp34)
tmp223 = tmp220 + tmp222
tmp224 = tmp5 - tmp215
tmp225 = triton_helpers.maximum(tmp224, tmp34)
tmp226 = tmp223 + tmp225
tmp227 = tmp226 - tmp9
tmp228 = tmp227 * tmp57
tmp229 = tmp228 >= tmp34
tmp230 = tl.where(tmp229, tmp215, tmp213)
tmp231 = tmp214 * tmp12
tmp232 = tmp230 + tmp231
tmp233 = tmp0 - tmp232
tmp234 = triton_helpers.maximum(tmp233, tmp34)
tmp235 = tmp1 - tmp232
tmp236 = triton_helpers.maximum(tmp235, tmp34)
tmp237 = tmp234 + tmp236
tmp238 = tmp3 - tmp232
tmp239 = triton_helpers.maximum(tmp238, tmp34)
tmp240 = tmp237 + tmp239
tmp241 = tmp5 - tmp232
tmp242 = triton_helpers.maximum(tmp241, tmp34)
tmp243 = tmp240 + tmp242
tmp244 = tmp243 - tmp9
tmp245 = tmp244 * tmp57
tmp246 = tmp245 >= tmp34
tmp247 = tl.where(tmp246, tmp232, tmp230)
tmp248 = tmp231 * tmp12
tmp249 = tmp247 + tmp248
tmp250 = tmp0 - tmp249
tmp251 = triton_helpers.maximum(tmp250, tmp34)
tmp252 = tmp1 - tmp249
tmp253 = triton_helpers.maximum(tmp252, tmp34)
tmp254 = tmp251 + tmp253
tmp255 = tmp3 - tmp249
tmp256 = triton_helpers.maximum(tmp255, tmp34)
tmp257 = tmp254 + tmp256
tmp258 = tmp5 - tmp249
tmp259 = triton_helpers.maximum(tmp258, tmp34)
tmp260 = tmp257 + tmp259
tmp261 = tmp260 - tmp9
tmp262 = tmp261 * tmp57
tmp263 = tmp262 >= tmp34
tmp264 = tl.where(tmp263, tmp249, tmp247)
tmp265 = tmp248 * tmp12
tmp266 = tmp264 + tmp265
tmp267 = tmp0 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp34)
tmp269 = tmp1 - tmp266
tmp270 = triton_helpers.maximum(tmp269, tmp34)
tmp271 = tmp268 + tmp270
tmp272 = tmp3 - tmp266
tmp273 = triton_helpers.maximum(tmp272, tmp34)
tmp274 = tmp271 + tmp273
tmp275 = tmp5 - tmp266
tmp276 = triton_helpers.maximum(tmp275, tmp34)
tmp277 = tmp274 + tmp276
tmp278 = tmp277 - tmp9
tmp279 = tmp278 * tmp57
tmp280 = tmp279 >= tmp34
tmp281 = tl.where(tmp280, tmp266, tmp264)
tmp282 = tmp265 * tmp12
tmp283 = tmp281 + tmp282
tmp284 = tmp0 - tmp283
tmp285 = triton_helpers.maximum(tmp284, tmp34)
tmp286 = tmp1 - tmp283
tmp287 = triton_helpers.maximum(tmp286, tmp34)
tmp288 = tmp285 + tmp287
tmp289 = tmp3 - tmp283
tmp290 = triton_helpers.maximum(tmp289, tmp34)
tmp291 = tmp288 + tmp290
tmp292 = tmp5 - tmp283
tmp293 = triton_helpers.maximum(tmp292, tmp34)
tmp294 = tmp291 + tmp293
tmp295 = tmp294 - tmp9
tmp296 = tmp295 * tmp57
tmp297 = tmp296 >= tmp34
tmp298 = tl.where(tmp297, tmp283, tmp281)
tmp299 = tmp282 * tmp12
tmp300 = tmp298 + tmp299
tmp301 = tmp0 - tmp300
tmp302 = triton_helpers.maximum(tmp301, tmp34)
tmp303 = tmp1 - tmp300
tmp304 = triton_helpers.maximum(tmp303, tmp34)
tmp305 = tmp302 + tmp304
tmp306 = tmp3 - tmp300
tmp307 = triton_helpers.maximum(tmp306, tmp34)
tmp308 = tmp305 + tmp307
tmp309 = tmp5 - tmp300
tmp310 = triton_helpers.maximum(tmp309, tmp34)
tmp311 = tmp308 + tmp310
tmp312 = tmp311 - tmp9
tmp313 = tmp312 * tmp57
tmp314 = tmp313 >= tmp34
tmp315 = tl.where(tmp314, tmp300, tmp298)
tmp316 = tmp299 * tmp12
tmp317 = tmp315 + tmp316
tmp318 = tmp0 - tmp317
tmp319 = triton_helpers.maximum(tmp318, tmp34)
tmp320 = tmp1 - tmp317
tmp321 = triton_helpers.maximum(tmp320, tmp34)
tmp322 = tmp319 + tmp321
tmp323 = tmp3 - tmp317
tmp324 = triton_helpers.maximum(tmp323, tmp34)
tmp325 = tmp322 + tmp324
tmp326 = tmp5 - tmp317
tmp327 = triton_helpers.maximum(tmp326, tmp34)
tmp328 = tmp325 + tmp327
tmp329 = tmp328 - tmp9
tmp330 = tmp329 * tmp57
tmp331 = tmp330 >= tmp34
tmp332 = tl.where(tmp331, tmp317, tmp315)
tmp333 = tmp316 * tmp12
tmp334 = tmp332 + tmp333
tmp335 = tmp0 - tmp334
tmp336 = triton_helpers.maximum(tmp335, tmp34)
tmp337 = tmp1 - tmp334
tmp338 = triton_helpers.maximum(tmp337, tmp34)
tmp339 = tmp336 + tmp338
tmp340 = tmp3 - tmp334
tmp341 = triton_helpers.maximum(tmp340, tmp34)
tmp342 = tmp339 + tmp341
tmp343 = tmp5 - tmp334
tmp344 = triton_helpers.maximum(tmp343, tmp34)
tmp345 = tmp342 + tmp344
tmp346 = tmp345 - tmp9
tmp347 = tmp346 * tmp57
tmp348 = tmp347 >= tmp34
tmp349 = tl.where(tmp348, tmp334, tmp332)
tmp350 = tmp333 * tmp12
tmp351 = tmp349 + tmp350
tmp352 = tmp0 - tmp351
tmp353 = triton_helpers.maximum(tmp352, tmp34)
tmp354 = tmp1 - tmp351
tmp355 = triton_helpers.maximum(tmp354, tmp34)
tmp356 = tmp353 + tmp355
tmp357 = tmp3 - tmp351
tmp358 = triton_helpers.maximum(tmp357, tmp34)
tmp359 = tmp356 + tmp358
tmp360 = tmp5 - tmp351
tmp361 = triton_helpers.maximum(tmp360, tmp34)
tmp362 = tmp359 + tmp361
tmp363 = tmp362 - tmp9
tmp364 = tmp363 * tmp57
tmp365 = tmp364 >= tmp34
tmp366 = tl.where(tmp365, tmp351, tmp349)
tmp367 = tmp350 * tmp12
tmp368 = tmp366 + tmp367
tmp369 = tmp0 - tmp368
tmp370 = triton_helpers.maximum(tmp369, tmp34)
tmp371 = tmp1 - tmp368
tmp372 = triton_helpers.maximum(tmp371, tmp34)
tmp373 = tmp370 + tmp372
tmp374 = tmp3 - tmp368
tmp375 = triton_helpers.maximum(tmp374, tmp34)
tmp376 = tmp373 + tmp375
tmp377 = tmp5 - tmp368
tmp378 = triton_helpers.maximum(tmp377, tmp34)
tmp379 = tmp376 + tmp378
tmp380 = tmp379 - tmp9
tmp381 = tmp380 * tmp57
tmp382 = tmp381 >= tmp34
tmp383 = tl.where(tmp382, tmp368, tmp366)
tmp384 = tmp367 * tmp12
tmp385 = tmp383 + tmp384
tmp386 = tmp0 - tmp385
tmp387 = triton_helpers.maximum(tmp386, tmp34)
tmp388 = tmp1 - tmp385
tmp389 = triton_helpers.maximum(tmp388, tmp34)
tmp390 = tmp387 + tmp389
tmp391 = tmp3 - tmp385
tmp392 = triton_helpers.maximum(tmp391, tmp34)
tmp393 = tmp390 + tmp392
tmp394 = tmp5 - tmp385
tmp395 = triton_helpers.maximum(tmp394, tmp34)
tmp396 = tmp393 + tmp395
tmp397 = tmp396 - tmp9
tmp398 = tmp397 * tmp57
tmp399 = tmp398 >= tmp34
tmp400 = tl.where(tmp399, tmp385, tmp383)
tmp401 = tmp384 * tmp12
tmp402 = tmp400 + tmp401
tmp403 = tmp0 - tmp402
tmp404 = triton_helpers.maximum(tmp403, tmp34)
tmp405 = tmp1 - tmp402
tmp406 = triton_helpers.maximum(tmp405, tmp34)
tmp407 = tmp404 + tmp406
tmp408 = tmp3 - tmp402
tmp409 = triton_helpers.maximum(tmp408, tmp34)
tmp410 = tmp407 + tmp409
tmp411 = tmp5 - tmp402
tmp412 = triton_helpers.maximum(tmp411, tmp34)
tmp413 = tmp410 + tmp412
tmp414 = tmp413 - tmp9
tmp415 = tmp414 * tmp57
tmp416 = tmp415 >= tmp34
tmp417 = tl.where(tmp416, tmp402, tmp400)
tmp418 = tmp401 * tmp12
tmp419 = tmp417 + tmp418
tmp420 = tmp0 - tmp419
tmp421 = triton_helpers.maximum(tmp420, tmp34)
tmp422 = tmp1 - tmp419
tmp423 = triton_helpers.maximum(tmp422, tmp34)
tmp424 = tmp421 + tmp423
tmp425 = tmp3 - tmp419
tmp426 = triton_helpers.maximum(tmp425, tmp34)
tmp427 = tmp424 + tmp426
tmp428 = tmp5 - tmp419
tmp429 = triton_helpers.maximum(tmp428, tmp34)
tmp430 = tmp427 + tmp429
tmp431 = tmp430 - tmp9
tmp432 = tmp431 * tmp57
tmp433 = tmp432 >= tmp34
tmp434 = tl.where(tmp433, tmp419, tmp417)
tl.store(out_ptr0 + x0, tmp30, xmask)
tl.store(in_out_ptr16 + x0, tmp434, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp1 + tmp26
tmp28 = tmp0 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_out_ptr0 + x0, xmask)
tmp33 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp32 + tmp57
tmp59 = tl.where(tmp31, tmp58, tmp32)
tl.store(in_out_ptr0 + x0, tmp59, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp1 + tmp27
tmp29 = tmp0 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_out_ptr0 + x0, xmask)
tmp33 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp11 = triton_helpers.maximum(tmp9, tmp10)
tmp13 = triton_helpers.maximum(tmp11, tmp12)
tmp15 = triton_helpers.maximum(tmp13, tmp14)
tmp16 = tmp15 - tmp7
tmp17 = tmp9 - tmp16
tmp18 = 0.0
tmp19 = triton_helpers.maximum(tmp17, tmp18)
tmp20 = tmp10 - tmp16
tmp21 = triton_helpers.maximum(tmp20, tmp18)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp16
tmp24 = triton_helpers.maximum(tmp23, tmp18)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp16
tmp27 = triton_helpers.maximum(tmp26, tmp18)
tmp28 = tmp25 + tmp27
tmp29 = tmp28 - tmp7
tmp30 = tmp8 * tmp29
tmp31 = tmp30 >= tmp18
tmp34 = 0.5
tmp35 = tmp33 * tmp34
tmp36 = tmp35 * tmp34
tmp37 = tmp36 * tmp34
tmp38 = tmp37 * tmp34
tmp39 = tmp38 * tmp34
tmp40 = tmp39 * tmp34
tmp41 = tmp40 * tmp34
tmp42 = tmp41 * tmp34
tmp43 = tmp42 * tmp34
tmp44 = tmp43 * tmp34
tmp45 = tmp44 * tmp34
tmp46 = tmp45 * tmp34
tmp47 = tmp46 * tmp34
tmp48 = tmp47 * tmp34
tmp49 = tmp48 * tmp34
tmp50 = tmp49 * tmp34
tmp51 = tmp50 * tmp34
tmp52 = tmp51 * tmp34
tmp53 = tmp52 * tmp34
tmp54 = tmp53 * tmp34
tmp55 = tmp54 * tmp34
tmp56 = tmp55 * tmp34
tmp57 = tmp56 * tmp34
tmp58 = tmp57 * tmp34
tmp59 = tmp32 + tmp58
tmp60 = tl.where(tmp31, tmp59, tmp32)
tl.store(in_out_ptr0 + x0, tmp60, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_6(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp1 + tmp28
tmp30 = tmp0 - tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_out_ptr0 + x0, xmask)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp36 + tmp63
tmp65 = tl.where(tmp35, tmp64, tmp36)
tl.store(in_out_ptr0 + x0, tmp65, xmask)
@triton.jit
def triton_poi_fused_add_div_sub_8(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp6 * tmp3
tmp8 = tmp7 * tmp3
tmp9 = tmp8 * tmp3
tmp10 = tmp9 * tmp3
tmp11 = tmp10 * tmp3
tmp12 = tmp11 * tmp3
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp3
tmp15 = tmp14 * tmp3
tmp16 = tmp15 * tmp3
tmp17 = tmp16 * tmp3
tmp18 = tmp17 * tmp3
tmp19 = tmp18 * tmp3
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp3
tmp22 = tmp21 * tmp3
tmp23 = tmp22 * tmp3
tmp24 = tmp23 * tmp3
tmp25 = tmp24 * tmp3
tmp26 = tmp25 * tmp3
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp3
tmp29 = tmp28 * tmp3
tmp30 = tmp1 + tmp29
tmp31 = tmp0 - tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9(in_out_ptr0,
in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, out_ptr4, out_ptr7, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_out_ptr0 + x0, xmask)
tmp37 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = triton_helpers.maximum(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp16 = triton_helpers.maximum(tmp14, tmp15)
tmp18 = triton_helpers.maximum(tmp16, tmp17)
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = tmp20 - tmp12
tmp22 = tmp14 - tmp21
tmp23 = triton_helpers.maximum(tmp22, tmp1)
tmp24 = tmp15 - tmp21
tmp25 = triton_helpers.maximum(tmp24, tmp1)
tmp26 = tmp23 + tmp25
tmp27 = tmp17 - tmp21
tmp28 = triton_helpers.maximum(tmp27, tmp1)
tmp29 = tmp26 + tmp28
tmp30 = tmp19 - tmp21
tmp31 = triton_helpers.maximum(tmp30, tmp1)
tmp32 = tmp29 + tmp31
tmp33 = tmp32 - tmp12
tmp34 = tmp13 * tmp33
tmp35 = tmp34 >= tmp1
tmp38 = 0.5
tmp39 = tmp37 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp38
tmp42 = tmp41 * tmp38
tmp43 = tmp42 * tmp38
tmp44 = tmp43 * tmp38
tmp45 = tmp44 * tmp38
tmp46 = tmp45 * tmp38
tmp47 = tmp46 * tmp38
tmp48 = tmp47 * tmp38
tmp49 = tmp48 * tmp38
tmp50 = tmp49 * tmp38
tmp51 = tmp50 * tmp38
tmp52 = tmp51 * tmp38
tmp53 = tmp52 * tmp38
tmp54 = tmp53 * tmp38
tmp55 = tmp54 * tmp38
tmp56 = tmp55 * tmp38
tmp57 = tmp56 * tmp38
tmp58 = tmp57 * tmp38
tmp59 = tmp58 * tmp38
tmp60 = tmp59 * tmp38
tmp61 = tmp60 * tmp38
tmp62 = tmp61 * tmp38
tmp63 = tmp62 * tmp38
tmp64 = tmp63 * tmp38
tmp65 = tmp36 + tmp64
tmp66 = tl.where(tmp35, tmp65, tmp36)
tmp67 = tmp64 * tmp38
tmp68 = tmp66 + tmp67
tmp69 = tmp14 - tmp68
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = tmp15 - tmp68
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = tmp70 + tmp72
tmp74 = tmp17 - tmp68
tmp75 = triton_helpers.maximum(tmp74, tmp1)
tmp76 = tmp73 + tmp75
tmp77 = tmp19 - tmp68
tmp78 = triton_helpers.maximum(tmp77, tmp1)
tmp79 = tmp76 + tmp78
tmp80 = tmp79 - tmp12
tmp81 = tmp80 * tmp33
tmp82 = tmp81 >= tmp1
tmp83 = tl.where(tmp82, tmp68, tmp66)
tmp84 = tmp67 * tmp38
tmp85 = tmp83 + tmp84
tmp86 = tmp14 - tmp85
tmp87 = triton_helpers.maximum(tmp86, tmp1)
tmp88 = tmp15 - tmp85
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = tmp87 + tmp89
tmp91 = tmp17 - tmp85
tmp92 = triton_helpers.maximum(tmp91, tmp1)
tmp93 = tmp90 + tmp92
tmp94 = tmp19 - tmp85
tmp95 = triton_helpers.maximum(tmp94, tmp1)
tmp96 = tmp93 + tmp95
tmp97 = tmp96 - tmp12
tmp98 = tmp97 * tmp33
tmp99 = tmp98 >= tmp1
tmp100 = tl.where(tmp99, tmp85, tmp83)
tmp101 = tmp84 * tmp38
tmp102 = tmp100 + tmp101
tmp103 = tmp14 - tmp102
tmp104 = triton_helpers.maximum(tmp103, tmp1)
tmp105 = tmp15 - tmp102
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = tmp104 + tmp106
tmp108 = tmp17 - tmp102
tmp109 = triton_helpers.maximum(tmp108, tmp1)
tmp110 = tmp107 + tmp109
tmp111 = tmp19 - tmp102
tmp112 = triton_helpers.maximum(tmp111, tmp1)
tmp113 = tmp110 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp33
tmp116 = tmp115 >= tmp1
tmp117 = tl.where(tmp116, tmp102, tmp100)
tmp118 = tmp101 * tmp38
tmp119 = tmp117 + tmp118
tmp120 = tmp14 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp1)
tmp122 = tmp15 - tmp119
tmp123 = triton_helpers.maximum(tmp122, tmp1)
tmp124 = tmp121 + tmp123
tmp125 = tmp17 - tmp119
tmp126 = triton_helpers.maximum(tmp125, tmp1)
tmp127 = tmp124 + tmp126
tmp128 = tmp19 - tmp119
tmp129 = triton_helpers.maximum(tmp128, tmp1)
tmp130 = tmp127 + tmp129
tmp131 = tmp130 - tmp12
tmp132 = tmp131 * tmp33
tmp133 = tmp132 >= tmp1
tmp134 = tl.where(tmp133, tmp119, tmp117)
tmp135 = tmp118 * tmp38
tmp136 = tmp134 + tmp135
tmp137 = tmp14 - tmp136
tmp138 = triton_helpers.maximum(tmp137, tmp1)
tmp139 = tmp15 - tmp136
tmp140 = triton_helpers.maximum(tmp139, tmp1)
tmp141 = tmp138 + tmp140
tmp142 = tmp17 - tmp136
tmp143 = triton_helpers.maximum(tmp142, tmp1)
tmp144 = tmp141 + tmp143
tmp145 = tmp19 - tmp136
tmp146 = triton_helpers.maximum(tmp145, tmp1)
tmp147 = tmp144 + tmp146
tmp148 = tmp147 - tmp12
tmp149 = tmp148 * tmp33
tmp150 = tmp149 >= tmp1
tmp151 = tl.where(tmp150, tmp136, tmp134)
tmp152 = tmp135 * tmp38
tmp153 = tmp151 + tmp152
tmp154 = tmp14 - tmp153
tmp155 = triton_helpers.maximum(tmp154, tmp1)
tmp156 = tmp15 - tmp153
tmp157 = triton_helpers.maximum(tmp156, tmp1)
tmp158 = tmp155 + tmp157
tmp159 = tmp17 - tmp153
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = tmp158 + tmp160
tmp162 = tmp19 - tmp153
tmp163 = triton_helpers.maximum(tmp162, tmp1)
tmp164 = tmp161 + tmp163
tl.store(out_ptr4 + x0, tmp101, xmask)
tl.store(in_out_ptr2 + x0, tmp151, xmask)
tl.store(out_ptr7 + x0, tmp164, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_sub_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = 0.5
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp3
tmp6 = tmp5 * tmp3
tmp7 = tmp1 + tmp6
tmp8 = tmp0 - tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp12 = tmp10 / tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf40 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf40
get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_0[grid(64)](buf41,
arg0_1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf81 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf82 = reinterpret_tensor(buf81, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf81
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_1[grid(64)](buf82,
arg0_1, buf41, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf83 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_sub_2[grid(256)](arg0_1, buf82,
buf42, buf83, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf85 = buf82
del buf82
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_3[grid(64)](buf85,
buf83, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf86 = buf83
del buf83
triton_poi_fused_add_clamp_div_sub_4[grid(256)](arg0_1, buf85,
buf42, buf86, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf88 = buf85
del buf85
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_5[grid(64)](buf88,
buf86, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf89 = buf86
del buf86
triton_poi_fused_add_div_sub_6[grid(256)](arg0_1, buf88, buf42,
buf89, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf91 = buf88
del buf88
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_7[grid(64)](buf91,
buf89, arg0_1, buf42, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf92 = buf89
del buf89
triton_poi_fused_add_div_sub_8[grid(256)](arg0_1, buf91, buf42,
buf92, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf94 = buf91
del buf91
buf100 = buf41
del buf41
buf103 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf104 = reinterpret_tensor(buf103, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf103
buf105 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_max_mul_sub_sum_where_9[grid(64)](buf94,
buf104, buf92, arg0_1, buf42, buf100, buf105, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf42
del buf94
buf106 = buf92
del buf92
triton_poi_fused_add_clamp_div_sub_10[grid(256)](arg0_1, buf104,
buf100, buf105, buf106, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del buf100
del buf104
del buf105
return buf106,
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):
"""sparsemax: normalizing sparse transform (a la softmax), via bisection.
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Note: This function does not yet support normalizing along anything except
the last dimension. Please use transposing and views to achieve more
general behavior.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class SparsemaxBisectFunction(EntmaxBisectFunction):
@classmethod
def _gp(cls, x, alpha):
return x
@classmethod
def _gp_inv(cls, y, alpha):
return y
@classmethod
def _p(cls, x, alpha):
return torch.clamp(x, min=0)
@classmethod
def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True):
return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50,
ensure_sum_one=True)
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y > 0
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return dX, None, None, None
class SparsemaxBisectNew(nn.Module):
def __init__(self, dim=-1, n_iter=None):
"""sparsemax: normalizing sparse transform (a la softmax) via bisection
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| roholazandie/entmax | SparsemaxBisect | false | 7,626 | [
"MIT"
] | 1 | 657374e6a792ec6840b6f78bc759cc1f51570aad | https://github.com/roholazandie/entmax/tree/657374e6a792ec6840b6f78bc759cc1f51570aad | from torch.autograd import Function
import torch
import torch.nn as nn
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):
"""sparsemax: normalizing sparse transform (a la softmax), via bisection.
Solves the projection:
min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.
Parameters
----------
X : torch.Tensor
The input tensor.
dim : int
The dimension along which to apply sparsemax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Note: This function does not yet support normalizing along anything except
the last dimension. Please use transposing and views to achieve more
general behavior.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class SparsemaxBisectFunction(EntmaxBisectFunction):
@classmethod
def _gp(cls, x, alpha):
return x
@classmethod
def _gp_inv(cls, y, alpha):
return y
@classmethod
def _p(cls, x, alpha):
return torch.clamp(x, min=0)
@classmethod
def forward(cls, ctx, X, dim=-1, n_iter=50, ensure_sum_one=True):
return super().forward(ctx, X, alpha=2, dim=dim, n_iter=50,
ensure_sum_one=True)
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = Y > 0
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
return d
# ... truncated (>4000 chars) for memory efficiency |
GPT2Layer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/wd/cwdz7kqs3uwyg53zsyekt77eye7yjl6v7vulow2q6ni534mkf6zw.py
# Topologically Sorted Source Nodes: [src1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/vs/cvsfvbs4wlaqvwxm3svg65dnhcq336ptudvn6xetnbnrtzj7xssn.py
# Topologically Sorted Source Nodes: [src1], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_3, [2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_3, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_2), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7i/c7iv2wtnh2crmgpbosmgkserqe5gtald6a35rfywer4k4h3ifw5k.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_weights => clone_3
# Graph fragment:
# %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ot/cotwkdmtynl52fs6cspxxggpcx3cbwefaeojzv7cx6urigofvn4r.py
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_weights => clone_4
# Graph fragment:
# %clone_4 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fx/cfxmabl7vb2dtcjtqswabcgzm57j7prwh2yo42iipcmfrbhhfspk.py
# Topologically Sorted Source Nodes: [attn_weights_1, mul_1, sub, mul_2, attn_weights_2, attn_weights_3], Original ATen: [aten.mul, aten.sub, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_weights_1 => mul_2
# attn_weights_2 => add_2
# attn_weights_3 => amax, exp, sub_2, sum_1
# mul_1 => mul_3
# mul_2 => mul_4
# sub => sub_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %unsqueeze), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 10000.0), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add_2, [-1], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_add_mul_sub_4 = async_compile.triton('triton_poi_fused__softmax_add_mul_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_sub_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (4*x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + ((4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (1 + (4*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x3)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (2 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (3 + (4*x3)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (3 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp3 - tmp1
tmp6 = 10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp4 + tmp7
tmp10 = tmp9 * tmp1
tmp12 = tmp10 * tmp11
tmp13 = tmp11 - tmp1
tmp14 = tmp13 * tmp6
tmp15 = tmp12 + tmp14
tmp16 = triton_helpers.maximum(tmp8, tmp15)
tmp18 = tmp17 * tmp1
tmp20 = tmp18 * tmp19
tmp21 = tmp19 - tmp1
tmp22 = tmp21 * tmp6
tmp23 = tmp20 + tmp22
tmp24 = triton_helpers.maximum(tmp16, tmp23)
tmp26 = tmp25 * tmp1
tmp28 = tmp26 * tmp27
tmp29 = tmp27 - tmp1
tmp30 = tmp29 * tmp6
tmp31 = tmp28 + tmp30
tmp32 = triton_helpers.maximum(tmp24, tmp31)
tmp33 = tmp8 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp15 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp23 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp31 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tl.store(out_ptr0 + (x3), tmp32, xmask)
tl.store(out_ptr1 + (x3), tmp43, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/jj/cjjcmtldcmcylztcijaojnwobhir2p5nmxev37wgvputqrpji3lv.py
# Topologically Sorted Source Nodes: [attn_weights_1, mul_1, sub, mul_2, attn_weights_2, attn_weights_3], Original ATen: [aten.mul, aten.sub, aten.add, aten._softmax]
# Source node to ATen node mapping:
# attn_weights_1 => mul_2
# attn_weights_2 => add_2
# attn_weights_3 => div, exp, sub_2
# mul_1 => mul_3
# mul_2 => mul_4
# sub => sub_1
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %unsqueeze), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, 10000.0), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_add_mul_sub_5 = async_compile.triton('triton_poi_fused__softmax_add_mul_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_mul_sub_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_mul_sub_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = (xindex // 64)
x5 = xindex % 16
x6 = (xindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp3 = tl.load(in_ptr0 + (x5 + (16*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x6), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp3 - tmp1
tmp6 = 10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp4 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tl.store(in_out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wo/cwobf6yh3xzgoa2hzjbquwp4ucfqunitxzc3sam7tw5th75pmcnt.py
# Topologically Sorted Source Nodes: [attn_output], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_output => clone_6
# Graph fragment:
# %clone_6 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_6 = async_compile.triton('triton_poi_fused_clone_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + (12*x2) + (48*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lw/clwfsjrjxeb2gmxy5p3lplvcrvrn37iuw4atjria32bxp2jajrtc.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_7
# Graph fragment:
# %clone_7 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_7 = async_compile.triton('triton_poi_fused_clone_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xw/cxwztfhiso3uego7k7zbqdm4uahezg4u5zy7ri26p3g76e2lbxyf.py
# Topologically Sorted Source Nodes: [src, src1_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add_3
# src1_1 => var_mean_1
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_13), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/so/csozgpoz3hmv3hrlzcentzlzvi4jqhlqmcwqydcltpnthhqeurgb.py
# Topologically Sorted Source Nodes: [src, src1_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add_3
# src1_1 => add_4, add_5, mul_5, mul_6, rsqrt_1, sub_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_13), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_5, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_6), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %primals_9), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_10), kwargs = {})
triton_poi_fused_add_native_layer_norm_9 = async_compile.triton('triton_poi_fused_add_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5t/c5trxoio6pkvidg4a47vgy7nkm237e45vf22l4wwn3rfbee6ihp3.py
# Topologically Sorted Source Nodes: [src1_2], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# src1_2 => add_6, erf, mul_7, mul_8, mul_9
# Graph fragment:
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, 0.5), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_8,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, %add_6), kwargs = {})
triton_poi_fused_gelu_10 = async_compile.triton('triton_poi_fused_gelu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hn/chnmuz2t2wr4y3ok7llgubdm3oh3p5vjxsah26e3wmtffvr6khii.py
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src => add_3
# src_1 => add_7
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_13), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_17), kwargs = {})
triton_poi_fused_add_11 = async_compile.triton('triton_poi_fused_add_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [src1], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_3, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src1], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_3, buf0, buf1, primals_1, primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf3, primals_6, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf3, primals_6, buf5, 16, 4, grid=grid(16, 4), stream=stream0)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights_1, mul_1, sub, mul_2, attn_weights_2, attn_weights_3], Original ATen: [aten.mul, aten.sub, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_sub_4.run(buf6, primals_4, buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [attn_weights_1, mul_1, sub, mul_2, attn_weights_2, attn_weights_3], Original ATen: [aten.mul, aten.sub, aten.add, aten._softmax]
triton_poi_fused__softmax_add_mul_sub_5.run(buf9, primals_4, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [attn_output], Original ATen: [aten.clone]
triton_poi_fused_clone_6.run(buf3, primals_6, buf10, 16, 4, grid=grid(16, 4), stream=stream0)
del buf3
del primals_6
buf11 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [attn_output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_7.run(buf11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [attn_output_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf13)
del primals_8
buf14 = buf1; del buf1 # reuse
buf15 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [src, src1_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(primals_3, buf13, buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src, src1_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_9.run(primals_3, buf13, buf14, buf15, primals_9, primals_10, buf16, 64, grid=grid(64), stream=stream0)
del buf14
del buf15
del primals_10
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_12, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17)
del primals_12
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src1_2], Original ATen: [aten.gelu]
triton_poi_fused_gelu_10.run(buf17, buf18, 64, grid=grid(64), stream=stream0)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [src, src_1], Original ATen: [aten.add]
triton_poi_fused_add_11.run(buf20, primals_3, buf13, primals_14, 64, grid=grid(64), stream=stream0)
del primals_14
return (buf20, primals_3, primals_4, primals_9, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), primals_13, primals_11, primals_7, reinterpret_tensor(buf10, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_ipt: 'int', n_head: 'int', dropout_p: 'float'=0.1):
super(MultiHeadSelfAttention, self).__init__()
self.qkv_linear = nn.Linear(d_ipt, d_ipt * 3, True)
self.n_head = n_head
self.output_linear = nn.Linear(d_ipt, d_ipt, True)
self.dropout = nn.Dropout(dropout_p)
def forward(self, src: 'torch.FloatTensor', attn_mask: 'torch.FloatTensor'
) ->torch.FloatTensor:
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if attn_mask.dim() == 3:
attn_mask = attn_mask.unsqueeze(1)
q, k, v = self.qkv_linear(src).chunk(3, dim=-1)
q = q.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 1, 3)
k = k.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 3, 1)
v = v.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 1, 3)
attn_weights = torch.matmul(q, k)
attn_weights = attn_weights * float(src.shape[2] // self.n_head
) ** -0.5
attn_weights = attn_weights * attn_mask + (attn_mask - 1) * 10000.0
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = torch.matmul(attn_weights, v)
attn_output = attn_output.permute(0, 2, 1, 3).contiguous().view(src
.shape)
attn_output = self.output_linear(attn_output)
return attn_output
class GPT2Layer(nn.Module):
def __init__(self, config: 'GPT2Config'):
super(GPT2Layer, self).__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.self_attn = MultiHeadSelfAttention(d_ipt=config.hidden_size,
n_head=config.n_head, dropout_p=config.drop_out)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.intermediate_linear1 = nn.Linear(config.hidden_size, config.
d_intermediate, True)
self.intermediate_linear2 = nn.Linear(config.d_intermediate, config
.hidden_size, True)
self.dropout = nn.Dropout(config.drop_out)
self.dropout1 = nn.Dropout(config.drop_out)
self.dropout2 = nn.Dropout(config.drop_out)
def forward(self, src: 'torch.FloatTensor', src_mask: 'torch.FloatTensor'
) ->torch.FloatTensor:
src1 = self.layer_norm1(src)
src1 = self.self_attn(src1, src_mask)
src = src + self.dropout1(src1)
src1 = self.layer_norm2(src)
src1 = F.gelu(self.intermediate_linear1(src1))
src1 = self.intermediate_linear2(src1)
src1 = self.dropout(src1)
src = src + src1
return src
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, n_head=4, drop_out=
0.5, d_intermediate=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused__softmax_add_mul_sub_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + 4 * x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4 * x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (1 + 4 * x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (1 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (2 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (3 + 4 * x3), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr1 + (3 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp3 - tmp1
tmp6 = 10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp4 + tmp7
tmp10 = tmp9 * tmp1
tmp12 = tmp10 * tmp11
tmp13 = tmp11 - tmp1
tmp14 = tmp13 * tmp6
tmp15 = tmp12 + tmp14
tmp16 = triton_helpers.maximum(tmp8, tmp15)
tmp18 = tmp17 * tmp1
tmp20 = tmp18 * tmp19
tmp21 = tmp19 - tmp1
tmp22 = tmp21 * tmp6
tmp23 = tmp20 + tmp22
tmp24 = triton_helpers.maximum(tmp16, tmp23)
tmp26 = tmp25 * tmp1
tmp28 = tmp26 * tmp27
tmp29 = tmp27 - tmp1
tmp30 = tmp29 * tmp6
tmp31 = tmp28 + tmp30
tmp32 = triton_helpers.maximum(tmp24, tmp31)
tmp33 = tmp8 - tmp32
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp15 - tmp32
tmp36 = tl_math.exp(tmp35)
tmp37 = tmp34 + tmp36
tmp38 = tmp23 - tmp32
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp37 + tmp39
tmp41 = tmp31 - tmp32
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tl.store(out_ptr0 + x3, tmp32, xmask)
tl.store(out_ptr1 + x3, tmp43, xmask)
@triton.jit
def triton_poi_fused__softmax_add_mul_sub_5(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex // 64
x5 = xindex % 16
x6 = xindex // 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp3 = tl.load(in_ptr0 + (x5 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + x6, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp3 - tmp1
tmp6 = 10000.0
tmp7 = tmp5 * tmp6
tmp8 = tmp4 + tmp7
tmp10 = tmp8 - tmp9
tmp11 = tl_math.exp(tmp10)
tmp13 = tmp11 / tmp12
tl.store(in_out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_clone_6(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_gelu_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_11(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (12, 4), (4, 1))
assert_size_stride(primals_6, (12,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4, 4), (4, 1))
assert_size_stride(primals_12, (4,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf1 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(16)](primals_3, buf0,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](primals_3, buf0,
buf1, primals_1, primals_2, buf2, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_1
del primals_2
buf3 = empty_strided_cuda((16, 12), (12, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 12), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(16, 4)](buf3, primals_6, buf4, 16, 4,
XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(16, 4)](buf3, primals_6, buf5, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf5, (16, 1, 4), (4, 0, 1), 0), out=buf6)
buf7 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused__softmax_add_mul_sub_4[grid(64)](buf6, primals_4,
buf7, buf8, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused__softmax_add_mul_sub_5[grid(256)](buf9, primals_4,
buf7, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_6[grid(16, 4)](buf3, primals_6, buf10, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
del buf3
del primals_6
buf11 = reinterpret_tensor(buf7, (16, 4, 1), (4, 1, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (16, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
triton_poi_fused_clone_7[grid(16, 4)](buf11, buf12, 16, 4, XBLOCK=4,
YBLOCK=16, num_warps=1, num_stages=1)
buf13 = reinterpret_tensor(buf11, (16, 4), (4, 1), 0)
del buf11
extern_kernels.addmm(primals_8, reinterpret_tensor(buf12, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf13)
del primals_8
buf14 = buf1
del buf1
buf15 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_8[grid(16)](primals_3, buf13,
buf14, buf15, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_9[grid(64)](primals_3, buf13,
buf14, buf15, primals_9, primals_10, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf14
del buf15
del primals_10
buf17 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_12, reinterpret_tensor(buf16, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf17)
del primals_12
buf18 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_gelu_10[grid(64)](buf17, buf18, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
triton_poi_fused_add_11[grid(64)](buf20, primals_3, buf13,
primals_14, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_14
return buf20, primals_3, primals_4, primals_9, reinterpret_tensor(buf2,
(16, 4), (4, 1), 0), buf9, reinterpret_tensor(buf12, (16, 4), (4, 1), 0
), buf13, reinterpret_tensor(buf16, (16, 4), (4, 1), 0
), buf17, reinterpret_tensor(buf18, (16, 4), (4, 1), 0
), primals_13, primals_11, primals_7, reinterpret_tensor(buf10, (16,
1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf5, (16, 4, 1), (4, 1, 4), 0), primals_5
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_ipt: 'int', n_head: 'int', dropout_p: 'float'=0.1):
super(MultiHeadSelfAttention, self).__init__()
self.qkv_linear = nn.Linear(d_ipt, d_ipt * 3, True)
self.n_head = n_head
self.output_linear = nn.Linear(d_ipt, d_ipt, True)
self.dropout = nn.Dropout(dropout_p)
def forward(self, src: 'torch.FloatTensor', attn_mask: 'torch.FloatTensor'
) ->torch.FloatTensor:
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if attn_mask.dim() == 3:
attn_mask = attn_mask.unsqueeze(1)
q, k, v = self.qkv_linear(src).chunk(3, dim=-1)
q = q.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 1, 3)
k = k.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 3, 1)
v = v.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 1, 3)
attn_weights = torch.matmul(q, k)
attn_weights = attn_weights * float(src.shape[2] // self.n_head
) ** -0.5
attn_weights = attn_weights * attn_mask + (attn_mask - 1) * 10000.0
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = torch.matmul(attn_weights, v)
attn_output = attn_output.permute(0, 2, 1, 3).contiguous().view(src
.shape)
attn_output = self.output_linear(attn_output)
return attn_output
class GPT2LayerNew(nn.Module):
def __init__(self, config: 'GPT2Config'):
super(GPT2LayerNew, self).__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.self_attn = MultiHeadSelfAttention(d_ipt=config.hidden_size,
n_head=config.n_head, dropout_p=config.drop_out)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.intermediate_linear1 = nn.Linear(config.hidden_size, config.
d_intermediate, True)
self.intermediate_linear2 = nn.Linear(config.d_intermediate, config
.hidden_size, True)
self.dropout = nn.Dropout(config.drop_out)
self.dropout1 = nn.Dropout(config.drop_out)
self.dropout2 = nn.Dropout(config.drop_out)
def forward(self, input_0, input_1):
primals_1 = self.layer_norm1.weight
primals_2 = self.layer_norm1.bias
primals_5 = self.self_attn.qkv_linear.weight
primals_6 = self.self_attn.qkv_linear.bias
primals_7 = self.self_attn.output_linear.weight
primals_8 = self.self_attn.output_linear.bias
primals_9 = self.layer_norm2.weight
primals_10 = self.layer_norm2.bias
primals_11 = self.intermediate_linear1.weight
primals_12 = self.intermediate_linear1.bias
primals_13 = self.intermediate_linear2.weight
primals_14 = self.intermediate_linear2.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| DunZhang/GPT2SourceCode | GPT2Layer | false | 7,627 | [
"MIT"
] | 1 | d598dbae278c93f88469d45ec025da4cfa7d69ee | https://github.com/DunZhang/GPT2SourceCode/tree/d598dbae278c93f88469d45ec025da4cfa7d69ee | from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiHeadSelfAttention(nn.Module):
def __init__(self, d_ipt: 'int', n_head: 'int', dropout_p: 'float'=0.1):
super().__init__()
self.qkv_linear = nn.Linear(d_ipt, d_ipt * 3, True)
self.n_head = n_head
self.output_linear = nn.Linear(d_ipt, d_ipt, True)
self.dropout = nn.Dropout(dropout_p)
def forward(self, src: 'torch.FloatTensor', attn_mask: 'torch.FloatTensor'
) ->torch.FloatTensor:
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if attn_mask.dim() == 3:
attn_mask = attn_mask.unsqueeze(1)
q, k, v = self.qkv_linear(src).chunk(3, dim=-1)
q = q.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 1, 3)
k = k.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 3, 1)
v = v.contiguous().view(src.shape[0], src.shape[1], self.n_head,
src.shape[2] // self.n_head).permute(0, 2, 1, 3)
attn_weights = torch.matmul(q, k)
attn_weights = attn_weights * float(src.shape[2] // self.n_head
) ** -0.5
attn_weights = attn_weights * attn_mask + (attn_mask - 1) * 10000.0
attn_weights = F.softmax(attn_weights, dim=-1)
attn_weights = self.dropout(attn_weights)
attn_output = torch.matmul(attn_weights, v)
attn_output = attn_output.permute(0, 2, 1, 3).contiguous().view(src
.shape)
attn_output = self.output_linear(attn_output)
return attn_output
class Model(nn.Module):
def __init__(self, config: 'GPT2Config'):
super().__init__()
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.self_attn = MultiHeadSelfAttention(d_ipt=config.hidden_size,
n_head=config.n_head, dropout_p=config.drop_out)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.intermediate_linear1 = nn.Linear(config.hidden_size, config.
d_intermediate, True)
self.intermediate_linear2 = nn.Linear(config.d_intermediate, config
.hidden_size, True)
self.dropout = nn.Dropout(config.drop_out)
self.dropout1 = nn.Dropout(config.drop_out)
self.dropout2 = nn.Dropout(config.drop_out)
def forward(self, src: 'torch.FloatTensor', src_mask: 'torch.FloatTensor'
) ->torch.FloatTensor:
src1 = self.layer_norm1(src)
src1 = self.self_attn(src1, src_mask)
src = src + self.dropout1(src1)
src1 = self.layer_norm2(src)
src1 = F.gelu(self.intermediate_linear1(src1))
src1 = self.intermediate_linear2(src1)
src1 = self.dropout(src1)
src = src + src1
return src
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'config': _mock_config(hidden_size=4, n_head=4, drop_out=
0.5, d_intermediate=4)}]
|
Gaussian | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/x6/cx6jmnk3w4fyizcgdntrll7zx32lso2oe3pzrnaggvqp5atfgroz.py
# Topologically Sorted Source Nodes: [neg, mul, exp], Original ATen: [aten.neg, aten.mul, aten.exp]
# Source node to ATen node mapping:
# exp => exp
# mul => mul
# neg => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %arg0_1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
triton_poi_fused_exp_mul_neg_0 = async_compile.triton('triton_poi_fused_exp_mul_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tmp2 = tmp1 * tmp0
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neg, mul, exp], Original ATen: [aten.neg, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_mul_neg_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
import torch.utils.tensorboard
import torch.utils.data
class Gaussian(torch.nn.Module):
"""Gaussian activation"""
def forward(self, x: 'Tensor') ->Tensor:
return torch.exp(-x * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.tensorboard
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_mul_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tmp2 = tmp1 * tmp0
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_mul_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GaussianNew(torch.nn.Module):
"""Gaussian activation"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| raimis/torchani | Gaussian | false | 7,628 | [
"MIT"
] | 1 | 19882c6e18174e08423706a536366f89029a740a | https://github.com/raimis/torchani/tree/19882c6e18174e08423706a536366f89029a740a | import torch
from torch import Tensor
import torch.utils.tensorboard
import torch.utils.data
class Model(torch.nn.Module):
"""Gaussian activation"""
def forward(self, x: 'Tensor') ->Tensor:
return torch.exp(-x * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
EntmaxBisect | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/jl/cjlewkazu5swf57ve2vqouzokbgz2nyjl3wq5slxk6ufxvzdg2y4.py
# Topologically Sorted Source Nodes: [sub, X, max_1, pow_2, tau_hi, pow_1, tau_lo, dm, dm_1, tau_m, sub_9, clamp_1, truediv_1, p_m, sum_2, sub_5, clamp, truediv, pow_3, sum_1, f_lo, tau_lo_1, dm_2, tau_m_1, sub_12, clamp_2, truediv_2, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_15, clamp_3, truediv_3, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_18, clamp_4, truediv_4, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_21, clamp_5, truediv_5, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_24, clamp_6, truediv_6, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_27, clamp_7, truediv_7, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_30, clamp_8, truediv_8, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_33, clamp_9, truediv_9, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_36, clamp_10, truediv_10, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_39, clamp_11, truediv_11, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_42, clamp_12, truediv_12, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_45, clamp_13, truediv_13, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_48, clamp_14, truediv_14, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_51, clamp_15, truediv_15, p_m_14, sum_16, tau_lo_15, dm_16, tau_m_15, sub_54, clamp_16, truediv_16, p_m_15, sum_17, f_m_15, tau_lo_16, dm_17, tau_m_16, sub_57, clamp_17, truediv_17, p_m_16, sum_18, tau_lo_17, dm_18, tau_m_17, sub_60, clamp_18, truediv_18, p_m_17, sum_19, tau_lo_18, dm_19, tau_m_18, sub_63, clamp_19, truediv_19, p_m_18, sum_20, tau_lo_19, dm_20, tau_m_19, sub_66, clamp_20, truediv_20, p_m_19, sum_21, tau_lo_20, dm_21, tau_m_20, sub_69, clamp_21, truediv_21, p_m_20, sum_22, tau_lo_21, dm_22, tau_m_21, sub_72, clamp_22, truediv_22, p_m_21, sum_23, tau_lo_22, dm_23, tau_m_22, sub_75, clamp_23, truediv_23, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_78, clamp_24, truediv_24, p_m_23, sum_25, tau_lo_24, dm_25, tau_m_24, sub_81, clamp_25, truediv_25, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_84, clamp_26, truediv_26, p_m_25, sum_27], Original ATen: [aten.sub, aten.mul, aten.max, aten.pow, aten.div, aten.add, aten.clamp, aten.sum, aten.where]
# Source node to ATen node mapping:
# X => mul
# clamp => clamp_min
# clamp_1 => clamp_min_1
# clamp_10 => clamp_min_10
# clamp_11 => clamp_min_11
# clamp_12 => clamp_min_12
# clamp_13 => clamp_min_13
# clamp_14 => clamp_min_14
# clamp_15 => clamp_min_15
# clamp_16 => clamp_min_16
# clamp_17 => clamp_min_17
# clamp_18 => clamp_min_18
# clamp_19 => clamp_min_19
# clamp_2 => clamp_min_2
# clamp_20 => clamp_min_20
# clamp_21 => clamp_min_21
# clamp_22 => clamp_min_22
# clamp_23 => clamp_min_23
# clamp_24 => clamp_min_24
# clamp_25 => clamp_min_25
# clamp_26 => clamp_min_26
# clamp_3 => clamp_min_3
# clamp_4 => clamp_min_4
# clamp_5 => clamp_min_5
# clamp_6 => clamp_min_6
# clamp_7 => clamp_min_7
# clamp_8 => clamp_min_8
# clamp_9 => clamp_min_9
# dm => sub_8
# dm_1 => div
# dm_10 => div_9
# dm_11 => div_10
# dm_12 => div_11
# dm_13 => div_12
# dm_14 => div_13
# dm_15 => div_14
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_2 => div_1
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_3 => div_2
# dm_4 => div_3
# dm_5 => div_4
# dm_6 => div_5
# dm_7 => div_6
# dm_8 => div_7
# dm_9 => div_8
# f_lo => sub_7
# f_m_1 => sub_14
# f_m_10 => sub_41
# f_m_11 => sub_44
# f_m_12 => sub_47
# f_m_13 => sub_50
# f_m_15 => sub_56
# f_m_2 => sub_17
# f_m_3 => sub_20
# f_m_4 => sub_23
# f_m_5 => sub_26
# f_m_6 => sub_29
# f_m_7 => sub_32
# f_m_8 => sub_35
# f_m_9 => sub_38
# max_1 => max_1
# mul_10 => mul_21
# mul_11 => mul_23
# mul_12 => mul_25
# mul_13 => mul_27
# mul_14 => mul_29
# mul_2 => mul_5
# mul_3 => mul_7
# mul_4 => mul_9
# mul_5 => mul_11
# mul_6 => mul_13
# mul_7 => mul_15
# mul_8 => mul_17
# mul_9 => mul_19
# p_m => pow_4
# p_m_1 => pow_5
# p_m_10 => pow_14
# p_m_11 => pow_15
# p_m_12 => pow_16
# p_m_13 => pow_17
# p_m_14 => pow_18
# p_m_15 => pow_19
# p_m_16 => pow_20
# p_m_17 => pow_21
# p_m_18 => pow_22
# p_m_19 => pow_23
# p_m_2 => pow_6
# p_m_20 => pow_24
# p_m_21 => pow_25
# p_m_22 => pow_26
# p_m_23 => pow_27
# p_m_24 => pow_28
# p_m_25 => pow_29
# p_m_3 => pow_7
# p_m_4 => pow_8
# p_m_5 => pow_9
# p_m_6 => pow_10
# p_m_7 => pow_11
# p_m_8 => pow_12
# p_m_9 => pow_13
# pow_1 => full_default_1
# pow_2 => full_default_2
# pow_3 => pow_3
# sub => full_default
# sub_12 => sub_12
# sub_15 => sub_15
# sub_18 => sub_18
# sub_21 => sub_21
# sub_24 => sub_24
# sub_27 => sub_27
# sub_30 => sub_30
# sub_33 => sub_33
# sub_36 => sub_36
# sub_39 => sub_39
# sub_42 => sub_42
# sub_45 => sub_45
# sub_48 => sub_48
# sub_5 => sub_5
# sub_51 => sub_51
# sub_54 => sub_54
# sub_57 => sub_57
# sub_60 => sub_60
# sub_63 => sub_63
# sub_66 => sub_66
# sub_69 => sub_69
# sub_72 => sub_72
# sub_75 => sub_75
# sub_78 => sub_78
# sub_81 => sub_81
# sub_84 => sub_84
# sub_9 => sub_9
# sum_1 => sum_1
# sum_10 => sum_10
# sum_11 => sum_11
# sum_12 => sum_12
# sum_13 => sum_13
# sum_14 => sum_14
# sum_15 => sum_15
# sum_16 => sum_16
# sum_17 => sum_17
# sum_18 => sum_18
# sum_19 => sum_19
# sum_2 => sum_2
# sum_20 => sum_20
# sum_21 => sum_21
# sum_22 => sum_22
# sum_23 => sum_23
# sum_24 => sum_24
# sum_25 => sum_25
# sum_26 => sum_26
# sum_27 => sum_27
# sum_3 => sum_3
# sum_4 => sum_4
# sum_5 => sum_5
# sum_6 => sum_6
# sum_7 => sum_7
# sum_8 => sum_8
# sum_9 => sum_9
# tau_hi => sub_4
# tau_lo => sub_2
# tau_lo_1 => where
# tau_lo_10 => where_9
# tau_lo_11 => where_10
# tau_lo_12 => where_11
# tau_lo_13 => where_12
# tau_lo_14 => where_13
# tau_lo_15 => where_14
# tau_lo_16 => where_15
# tau_lo_17 => where_16
# tau_lo_18 => where_17
# tau_lo_19 => where_18
# tau_lo_2 => where_1
# tau_lo_20 => where_19
# tau_lo_21 => where_20
# tau_lo_22 => where_21
# tau_lo_23 => where_22
# tau_lo_24 => where_23
# tau_lo_25 => where_24
# tau_lo_3 => where_2
# tau_lo_4 => where_3
# tau_lo_5 => where_4
# tau_lo_6 => where_5
# tau_lo_7 => where_6
# tau_lo_8 => where_7
# tau_lo_9 => where_8
# tau_m => add
# tau_m_1 => add_1
# tau_m_10 => add_10
# tau_m_11 => add_11
# tau_m_12 => add_12
# tau_m_13 => add_13
# tau_m_14 => add_14
# tau_m_15 => add_15
# tau_m_16 => add_16
# tau_m_17 => add_17
# tau_m_18 => add_18
# tau_m_19 => add_19
# tau_m_2 => add_2
# tau_m_20 => add_20
# tau_m_21 => add_21
# tau_m_22 => add_22
# tau_m_23 => add_23
# tau_m_24 => add_24
# tau_m_25 => add_25
# tau_m_3 => add_3
# tau_m_4 => add_4
# tau_m_5 => add_5
# tau_m_6 => add_6
# tau_m_7 => add_7
# tau_m_8 => add_8
# tau_m_9 => add_9
# truediv => full_default_3
# truediv_1 => full_default_4
# truediv_10 => full_default_13
# truediv_11 => full_default_14
# truediv_12 => full_default_15
# truediv_13 => full_default_16
# truediv_14 => full_default_17
# truediv_15 => full_default_18
# truediv_16 => full_default_19
# truediv_17 => full_default_20
# truediv_18 => full_default_21
# truediv_19 => full_default_22
# truediv_2 => full_default_5
# truediv_20 => full_default_23
# truediv_21 => full_default_24
# truediv_22 => full_default_25
# truediv_23 => full_default_26
# truediv_24 => full_default_27
# truediv_25 => full_default_28
# truediv_26 => full_default_29
# truediv_3 => full_default_6
# truediv_4 => full_default_7
# truediv_5 => full_default_8
# truediv_6 => full_default_9
# truediv_7 => full_default_10
# truediv_8 => full_default_11
# truediv_9 => full_default_12
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%mul, -1, True), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %full_default_2), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub_2 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (%getitem, %full_default_1), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_4, %sub_2), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_8, 2), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, %div), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0), kwargs = {})
# %full_default_4 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_1, %full_default_4), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [-1]), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_2), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0), kwargs = {})
# %full_default_3 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min, %full_default_3), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1]), kwargs = {})
# %sub_7 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze, %add, %sub_2), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div, 2), kwargs = {})
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, %div_1), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_1), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_12, 0), kwargs = {})
# %full_default_5 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_2, %full_default_5), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [-1]), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, 1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %sub_7), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_1, %add_1, %where), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_1, 2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %div_2), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_2), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0), kwargs = {})
# %full_default_6 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_3, %full_default_6), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_6, [-1]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_4, 1), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %sub_7), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_2, %add_2, %where_1), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_2, 2), kwargs = {})
# %add_3 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_2, %div_3), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_3), kwargs = {})
# %clamp_min_4 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_18, 0), kwargs = {})
# %full_default_7 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_4, %full_default_7), kwargs = {})
# %sum_5 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_7, [-1]), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, 1), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %sub_7), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_3, %add_3, %where_2), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_3, 2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_3, %div_4), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_4), kwargs = {})
# %clamp_min_5 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_21, 0), kwargs = {})
# %full_default_8 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_8 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_5, %full_default_8), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_8, [-1]), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_6, 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_23, %sub_7), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_4, %add_4, %where_3), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_4, 2), kwargs = {})
# %add_5 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_4, %div_5), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_5), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_24, 0), kwargs = {})
# %full_default_9 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_9 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_6, %full_default_9), kwargs = {})
# %sum_7 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_9, [-1]), kwargs = {})
# %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_7, 1), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_26, %sub_7), kwargs = {})
# %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_5, %add_5, %where_4), kwargs = {})
# %div_6 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_5, 2), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_5, %div_6), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_6), kwargs = {})
# %clamp_min_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_27, 0), kwargs = {})
# %full_default_10 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_10 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_7, %full_default_10), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_10, [-1]), kwargs = {})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_8, 1), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_29, %sub_7), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_6, %add_6, %where_5), kwargs = {})
# %div_7 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_6, 2), kwargs = {})
# %add_7 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_6, %div_7), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_7), kwargs = {})
# %clamp_min_8 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_30, 0), kwargs = {})
# %full_default_11 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_11 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_8, %full_default_11), kwargs = {})
# %sum_9 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_11, [-1]), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_9, 1), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %sub_7), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_7, %add_7, %where_6), kwargs = {})
# %div_8 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_7, 2), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_7, %div_8), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_8), kwargs = {})
# %clamp_min_9 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_33, 0), kwargs = {})
# %full_default_12 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_12 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_9, %full_default_12), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_12, [-1]), kwargs = {})
# %sub_35 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_10, 1), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_35, %sub_7), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_8, %add_8, %where_7), kwargs = {})
# %div_9 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_8, 2), kwargs = {})
# %add_9 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_8, %div_9), kwargs = {})
# %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_9), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_36, 0), kwargs = {})
# %full_default_13 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_13 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_10, %full_default_13), kwargs = {})
# %sum_11 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_13, [-1]), kwargs = {})
# %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_11, 1), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_38, %sub_7), kwargs = {})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_9, %add_9, %where_8), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_9, 2), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_9, %div_10), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_10), kwargs = {})
# %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_39, 0), kwargs = {})
# %full_default_14 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_14 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_11, %full_default_14), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_14, [-1]), kwargs = {})
# %sub_41 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_12, 1), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_41, %sub_7), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_10, %add_10, %where_9), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_10, 2), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_10, %div_11), kwargs = {})
# %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_11), kwargs = {})
# %clamp_min_12 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_42, 0), kwargs = {})
# %full_default_15 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_15 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_12, %full_default_15), kwargs = {})
# %sum_13 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_15, [-1]), kwargs = {})
# %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_13, 1), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_44, %sub_7), kwargs = {})
# %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_11, %add_11, %where_10), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_11, 2), kwargs = {})
# %add_12 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_11, %div_12), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_12), kwargs = {})
# %clamp_min_13 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_45, 0), kwargs = {})
# %full_default_16 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_16 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_13, %full_default_16), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_16, [-1]), kwargs = {})
# %sub_47 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_14, 1), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_47, %sub_7), kwargs = {})
# %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_12, %add_12, %where_11), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_12, 2), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_12, %div_13), kwargs = {})
# %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_13), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_48, 0), kwargs = {})
# %full_default_17 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_17 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_14, %full_default_17), kwargs = {})
# %sum_15 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_17, [-1]), kwargs = {})
# %sub_50 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_15, 1), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_50, %sub_7), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_13, %add_13, %where_12), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_13, 2), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_13, %div_14), kwargs = {})
# %sub_51 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_14), kwargs = {})
# %clamp_min_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_51, 0), kwargs = {})
# %full_default_18 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_18 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_15, %full_default_18), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_18, [-1]), kwargs = {})
# %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_14, %add_14, %where_13), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %add_15 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_14, %div_15), kwargs = {})
# %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_15), kwargs = {})
# %clamp_min_16 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_54, 0), kwargs = {})
# %full_default_19 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_19 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_16, %full_default_19), kwargs = {})
# %sum_17 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_19, [-1]), kwargs = {})
# %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_17, 1), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_15, %add_15, %where_14), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %add_16 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_15, %div_16), kwargs = {})
# %sub_57 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_16), kwargs = {})
# %clamp_min_17 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_57, 0), kwargs = {})
# %full_default_20 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_20 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_17, %full_default_20), kwargs = {})
# %sum_18 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_20, [-1]), kwargs = {})
# %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_16, %add_16, %where_15), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %add_17 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_16, %div_17), kwargs = {})
# %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_17), kwargs = {})
# %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_60, 0), kwargs = {})
# %full_default_21 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_21 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_18, %full_default_21), kwargs = {})
# %sum_19 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_21, [-1]), kwargs = {})
# %where_17 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_17, %add_17, %where_16), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_17, %div_18), kwargs = {})
# %sub_63 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_18), kwargs = {})
# %clamp_min_19 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_63, 0), kwargs = {})
# %full_default_22 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_22 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_19, %full_default_22), kwargs = {})
# %sum_20 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_22, [-1]), kwargs = {})
# %where_18 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_18, %add_18, %where_17), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %add_19 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_18, %div_19), kwargs = {})
# %sub_66 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_19), kwargs = {})
# %clamp_min_20 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_66, 0), kwargs = {})
# %full_default_23 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_23 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_20, %full_default_23), kwargs = {})
# %sum_21 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_23, [-1]), kwargs = {})
# %where_19 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_19, %add_19, %where_18), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_19, %div_20), kwargs = {})
# %sub_69 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_20), kwargs = {})
# %clamp_min_21 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_69, 0), kwargs = {})
# %full_default_24 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_24 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_21, %full_default_24), kwargs = {})
# %sum_22 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_24, [-1]), kwargs = {})
# %where_20 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_20, %add_20, %where_19), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %add_21 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_20, %div_21), kwargs = {})
# %sub_72 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_21), kwargs = {})
# %clamp_min_22 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_72, 0), kwargs = {})
# %full_default_25 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_25 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_22, %full_default_25), kwargs = {})
# %sum_23 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_25, [-1]), kwargs = {})
# %where_21 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_21, %add_21, %where_20), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %add_22 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_21, %div_22), kwargs = {})
# %sub_75 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_22), kwargs = {})
# %clamp_min_23 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_75, 0), kwargs = {})
# %full_default_26 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_26 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_23, %full_default_26), kwargs = {})
# %sum_24 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_26, [-1]), kwargs = {})
# %where_22 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_22, %add_22, %where_21), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %add_23 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_22, %div_23), kwargs = {})
# %sub_78 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_23), kwargs = {})
# %clamp_min_24 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_78, 0), kwargs = {})
# %full_default_27 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_27 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_24, %full_default_27), kwargs = {})
# %sum_25 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_27, [-1]), kwargs = {})
# %where_23 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_23, %add_23, %where_22), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %add_24 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_23, %div_24), kwargs = {})
# %sub_81 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_24), kwargs = {})
# %clamp_min_25 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_81, 0), kwargs = {})
# %full_default_28 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_28 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_25, %full_default_28), kwargs = {})
# %sum_26 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_28, [-1]), kwargs = {})
# %where_24 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_24, %add_24, %where_23), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {})
# %sub_84 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_25), kwargs = {})
# %clamp_min_26 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_84, 0), kwargs = {})
# %full_default_29 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_29 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_26, %full_default_29), kwargs = {})
# %sum_27 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_29, [-1]), kwargs = {})
triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0 = async_compile.triton('triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0', 'mutated_arg_names': ['in_out_ptr12'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0(in_out_ptr12, in_ptr0, out_ptr0, out_ptr19, out_ptr27, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 * tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 * tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp14 = tmp11 - tmp1
tmp15 = tmp14 - tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp13 + tmp16
tmp18 = tmp2 - tmp17
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = 2.0
tmp22 = libdevice.pow(tmp20, tmp21)
tmp23 = tmp4 - tmp17
tmp24 = triton_helpers.maximum(tmp23, tmp19)
tmp25 = libdevice.pow(tmp24, tmp21)
tmp26 = tmp22 + tmp25
tmp27 = tmp7 - tmp17
tmp28 = triton_helpers.maximum(tmp27, tmp19)
tmp29 = libdevice.pow(tmp28, tmp21)
tmp30 = tmp26 + tmp29
tmp31 = tmp10 - tmp17
tmp32 = triton_helpers.maximum(tmp31, tmp19)
tmp33 = libdevice.pow(tmp32, tmp21)
tmp34 = tmp30 + tmp33
tmp35 = tmp2 - tmp13
tmp36 = triton_helpers.maximum(tmp35, tmp19)
tmp37 = libdevice.pow(tmp36, tmp21)
tmp38 = tmp4 - tmp13
tmp39 = triton_helpers.maximum(tmp38, tmp19)
tmp40 = libdevice.pow(tmp39, tmp21)
tmp41 = tmp37 + tmp40
tmp42 = tmp7 - tmp13
tmp43 = triton_helpers.maximum(tmp42, tmp19)
tmp44 = libdevice.pow(tmp43, tmp21)
tmp45 = tmp41 + tmp44
tmp46 = tmp10 - tmp13
tmp47 = triton_helpers.maximum(tmp46, tmp19)
tmp48 = libdevice.pow(tmp47, tmp21)
tmp49 = tmp45 + tmp48
tmp50 = tmp34 - tmp12
tmp51 = tmp49 - tmp12
tmp52 = tmp50 * tmp51
tmp53 = tmp52 >= tmp19
tmp54 = tl.where(tmp53, tmp17, tmp13)
tmp55 = tmp16 * tmp1
tmp56 = tmp54 + tmp55
tmp57 = tmp2 - tmp56
tmp58 = triton_helpers.maximum(tmp57, tmp19)
tmp59 = libdevice.pow(tmp58, tmp21)
tmp60 = tmp4 - tmp56
tmp61 = triton_helpers.maximum(tmp60, tmp19)
tmp62 = libdevice.pow(tmp61, tmp21)
tmp63 = tmp59 + tmp62
tmp64 = tmp7 - tmp56
tmp65 = triton_helpers.maximum(tmp64, tmp19)
tmp66 = libdevice.pow(tmp65, tmp21)
tmp67 = tmp63 + tmp66
tmp68 = tmp10 - tmp56
tmp69 = triton_helpers.maximum(tmp68, tmp19)
tmp70 = libdevice.pow(tmp69, tmp21)
tmp71 = tmp67 + tmp70
tmp72 = tmp71 - tmp12
tmp73 = tmp72 * tmp51
tmp74 = tmp73 >= tmp19
tmp75 = tl.where(tmp74, tmp56, tmp54)
tmp76 = tmp55 * tmp1
tmp77 = tmp75 + tmp76
tmp78 = tmp2 - tmp77
tmp79 = triton_helpers.maximum(tmp78, tmp19)
tmp80 = libdevice.pow(tmp79, tmp21)
tmp81 = tmp4 - tmp77
tmp82 = triton_helpers.maximum(tmp81, tmp19)
tmp83 = libdevice.pow(tmp82, tmp21)
tmp84 = tmp80 + tmp83
tmp85 = tmp7 - tmp77
tmp86 = triton_helpers.maximum(tmp85, tmp19)
tmp87 = libdevice.pow(tmp86, tmp21)
tmp88 = tmp84 + tmp87
tmp89 = tmp10 - tmp77
tmp90 = triton_helpers.maximum(tmp89, tmp19)
tmp91 = libdevice.pow(tmp90, tmp21)
tmp92 = tmp88 + tmp91
tmp93 = tmp92 - tmp12
tmp94 = tmp93 * tmp51
tmp95 = tmp94 >= tmp19
tmp96 = tl.where(tmp95, tmp77, tmp75)
tmp97 = tmp76 * tmp1
tmp98 = tmp96 + tmp97
tmp99 = tmp2 - tmp98
tmp100 = triton_helpers.maximum(tmp99, tmp19)
tmp101 = libdevice.pow(tmp100, tmp21)
tmp102 = tmp4 - tmp98
tmp103 = triton_helpers.maximum(tmp102, tmp19)
tmp104 = libdevice.pow(tmp103, tmp21)
tmp105 = tmp101 + tmp104
tmp106 = tmp7 - tmp98
tmp107 = triton_helpers.maximum(tmp106, tmp19)
tmp108 = libdevice.pow(tmp107, tmp21)
tmp109 = tmp105 + tmp108
tmp110 = tmp10 - tmp98
tmp111 = triton_helpers.maximum(tmp110, tmp19)
tmp112 = libdevice.pow(tmp111, tmp21)
tmp113 = tmp109 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp51
tmp116 = tmp115 >= tmp19
tmp117 = tl.where(tmp116, tmp98, tmp96)
tmp118 = tmp97 * tmp1
tmp119 = tmp117 + tmp118
tmp120 = tmp2 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp19)
tmp122 = libdevice.pow(tmp121, tmp21)
tmp123 = tmp4 - tmp119
tmp124 = triton_helpers.maximum(tmp123, tmp19)
tmp125 = libdevice.pow(tmp124, tmp21)
tmp126 = tmp122 + tmp125
tmp127 = tmp7 - tmp119
tmp128 = triton_helpers.maximum(tmp127, tmp19)
tmp129 = libdevice.pow(tmp128, tmp21)
tmp130 = tmp126 + tmp129
tmp131 = tmp10 - tmp119
tmp132 = triton_helpers.maximum(tmp131, tmp19)
tmp133 = libdevice.pow(tmp132, tmp21)
tmp134 = tmp130 + tmp133
tmp135 = tmp134 - tmp12
tmp136 = tmp135 * tmp51
tmp137 = tmp136 >= tmp19
tmp138 = tl.where(tmp137, tmp119, tmp117)
tmp139 = tmp118 * tmp1
tmp140 = tmp138 + tmp139
tmp141 = tmp2 - tmp140
tmp142 = triton_helpers.maximum(tmp141, tmp19)
tmp143 = libdevice.pow(tmp142, tmp21)
tmp144 = tmp4 - tmp140
tmp145 = triton_helpers.maximum(tmp144, tmp19)
tmp146 = libdevice.pow(tmp145, tmp21)
tmp147 = tmp143 + tmp146
tmp148 = tmp7 - tmp140
tmp149 = triton_helpers.maximum(tmp148, tmp19)
tmp150 = libdevice.pow(tmp149, tmp21)
tmp151 = tmp147 + tmp150
tmp152 = tmp10 - tmp140
tmp153 = triton_helpers.maximum(tmp152, tmp19)
tmp154 = libdevice.pow(tmp153, tmp21)
tmp155 = tmp151 + tmp154
tmp156 = tmp155 - tmp12
tmp157 = tmp156 * tmp51
tmp158 = tmp157 >= tmp19
tmp159 = tl.where(tmp158, tmp140, tmp138)
tmp160 = tmp139 * tmp1
tmp161 = tmp159 + tmp160
tmp162 = tmp2 - tmp161
tmp163 = triton_helpers.maximum(tmp162, tmp19)
tmp164 = libdevice.pow(tmp163, tmp21)
tmp165 = tmp4 - tmp161
tmp166 = triton_helpers.maximum(tmp165, tmp19)
tmp167 = libdevice.pow(tmp166, tmp21)
tmp168 = tmp164 + tmp167
tmp169 = tmp7 - tmp161
tmp170 = triton_helpers.maximum(tmp169, tmp19)
tmp171 = libdevice.pow(tmp170, tmp21)
tmp172 = tmp168 + tmp171
tmp173 = tmp10 - tmp161
tmp174 = triton_helpers.maximum(tmp173, tmp19)
tmp175 = libdevice.pow(tmp174, tmp21)
tmp176 = tmp172 + tmp175
tmp177 = tmp176 - tmp12
tmp178 = tmp177 * tmp51
tmp179 = tmp178 >= tmp19
tmp180 = tl.where(tmp179, tmp161, tmp159)
tmp181 = tmp160 * tmp1
tmp182 = tmp180 + tmp181
tmp183 = tmp2 - tmp182
tmp184 = triton_helpers.maximum(tmp183, tmp19)
tmp185 = libdevice.pow(tmp184, tmp21)
tmp186 = tmp4 - tmp182
tmp187 = triton_helpers.maximum(tmp186, tmp19)
tmp188 = libdevice.pow(tmp187, tmp21)
tmp189 = tmp185 + tmp188
tmp190 = tmp7 - tmp182
tmp191 = triton_helpers.maximum(tmp190, tmp19)
tmp192 = libdevice.pow(tmp191, tmp21)
tmp193 = tmp189 + tmp192
tmp194 = tmp10 - tmp182
tmp195 = triton_helpers.maximum(tmp194, tmp19)
tmp196 = libdevice.pow(tmp195, tmp21)
tmp197 = tmp193 + tmp196
tmp198 = tmp197 - tmp12
tmp199 = tmp198 * tmp51
tmp200 = tmp199 >= tmp19
tmp201 = tl.where(tmp200, tmp182, tmp180)
tmp202 = tmp181 * tmp1
tmp203 = tmp201 + tmp202
tmp204 = tmp2 - tmp203
tmp205 = triton_helpers.maximum(tmp204, tmp19)
tmp206 = libdevice.pow(tmp205, tmp21)
tmp207 = tmp4 - tmp203
tmp208 = triton_helpers.maximum(tmp207, tmp19)
tmp209 = libdevice.pow(tmp208, tmp21)
tmp210 = tmp206 + tmp209
tmp211 = tmp7 - tmp203
tmp212 = triton_helpers.maximum(tmp211, tmp19)
tmp213 = libdevice.pow(tmp212, tmp21)
tmp214 = tmp210 + tmp213
tmp215 = tmp10 - tmp203
tmp216 = triton_helpers.maximum(tmp215, tmp19)
tmp217 = libdevice.pow(tmp216, tmp21)
tmp218 = tmp214 + tmp217
tmp219 = tmp218 - tmp12
tmp220 = tmp219 * tmp51
tmp221 = tmp220 >= tmp19
tmp222 = tl.where(tmp221, tmp203, tmp201)
tmp223 = tmp202 * tmp1
tmp224 = tmp222 + tmp223
tmp225 = tmp2 - tmp224
tmp226 = triton_helpers.maximum(tmp225, tmp19)
tmp227 = libdevice.pow(tmp226, tmp21)
tmp228 = tmp4 - tmp224
tmp229 = triton_helpers.maximum(tmp228, tmp19)
tmp230 = libdevice.pow(tmp229, tmp21)
tmp231 = tmp227 + tmp230
tmp232 = tmp7 - tmp224
tmp233 = triton_helpers.maximum(tmp232, tmp19)
tmp234 = libdevice.pow(tmp233, tmp21)
tmp235 = tmp231 + tmp234
tmp236 = tmp10 - tmp224
tmp237 = triton_helpers.maximum(tmp236, tmp19)
tmp238 = libdevice.pow(tmp237, tmp21)
tmp239 = tmp235 + tmp238
tmp240 = tmp239 - tmp12
tmp241 = tmp240 * tmp51
tmp242 = tmp241 >= tmp19
tmp243 = tl.where(tmp242, tmp224, tmp222)
tmp244 = tmp223 * tmp1
tmp245 = tmp243 + tmp244
tmp246 = tmp2 - tmp245
tmp247 = triton_helpers.maximum(tmp246, tmp19)
tmp248 = libdevice.pow(tmp247, tmp21)
tmp249 = tmp4 - tmp245
tmp250 = triton_helpers.maximum(tmp249, tmp19)
tmp251 = libdevice.pow(tmp250, tmp21)
tmp252 = tmp248 + tmp251
tmp253 = tmp7 - tmp245
tmp254 = triton_helpers.maximum(tmp253, tmp19)
tmp255 = libdevice.pow(tmp254, tmp21)
tmp256 = tmp252 + tmp255
tmp257 = tmp10 - tmp245
tmp258 = triton_helpers.maximum(tmp257, tmp19)
tmp259 = libdevice.pow(tmp258, tmp21)
tmp260 = tmp256 + tmp259
tmp261 = tmp260 - tmp12
tmp262 = tmp261 * tmp51
tmp263 = tmp262 >= tmp19
tmp264 = tl.where(tmp263, tmp245, tmp243)
tmp265 = tmp244 * tmp1
tmp266 = tmp264 + tmp265
tmp267 = tmp2 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp19)
tmp269 = libdevice.pow(tmp268, tmp21)
tmp270 = tmp4 - tmp266
tmp271 = triton_helpers.maximum(tmp270, tmp19)
tmp272 = libdevice.pow(tmp271, tmp21)
tmp273 = tmp269 + tmp272
tmp274 = tmp7 - tmp266
tmp275 = triton_helpers.maximum(tmp274, tmp19)
tmp276 = libdevice.pow(tmp275, tmp21)
tmp277 = tmp273 + tmp276
tmp278 = tmp10 - tmp266
tmp279 = triton_helpers.maximum(tmp278, tmp19)
tmp280 = libdevice.pow(tmp279, tmp21)
tmp281 = tmp277 + tmp280
tmp282 = tmp281 - tmp12
tmp283 = tmp282 * tmp51
tmp284 = tmp283 >= tmp19
tmp285 = tl.where(tmp284, tmp266, tmp264)
tmp286 = tmp265 * tmp1
tmp287 = tmp285 + tmp286
tmp288 = tmp2 - tmp287
tmp289 = triton_helpers.maximum(tmp288, tmp19)
tmp290 = libdevice.pow(tmp289, tmp21)
tmp291 = tmp4 - tmp287
tmp292 = triton_helpers.maximum(tmp291, tmp19)
tmp293 = libdevice.pow(tmp292, tmp21)
tmp294 = tmp290 + tmp293
tmp295 = tmp7 - tmp287
tmp296 = triton_helpers.maximum(tmp295, tmp19)
tmp297 = libdevice.pow(tmp296, tmp21)
tmp298 = tmp294 + tmp297
tmp299 = tmp10 - tmp287
tmp300 = triton_helpers.maximum(tmp299, tmp19)
tmp301 = libdevice.pow(tmp300, tmp21)
tmp302 = tmp298 + tmp301
tmp303 = tmp302 - tmp12
tmp304 = tmp303 * tmp51
tmp305 = tmp304 >= tmp19
tmp306 = tl.where(tmp305, tmp287, tmp285)
tmp307 = tmp286 * tmp1
tmp308 = tmp307 * tmp1
tmp309 = tmp306 + tmp307
tmp310 = tmp2 - tmp309
tmp311 = triton_helpers.maximum(tmp310, tmp19)
tmp312 = libdevice.pow(tmp311, tmp21)
tmp313 = tmp4 - tmp309
tmp314 = triton_helpers.maximum(tmp313, tmp19)
tmp315 = libdevice.pow(tmp314, tmp21)
tmp316 = tmp312 + tmp315
tmp317 = tmp7 - tmp309
tmp318 = triton_helpers.maximum(tmp317, tmp19)
tmp319 = libdevice.pow(tmp318, tmp21)
tmp320 = tmp316 + tmp319
tmp321 = tmp10 - tmp309
tmp322 = triton_helpers.maximum(tmp321, tmp19)
tmp323 = libdevice.pow(tmp322, tmp21)
tmp324 = tmp320 + tmp323
tmp325 = tmp324 - tmp12
tmp326 = tmp325 * tmp51
tmp327 = tmp326 >= tmp19
tmp328 = tl.where(tmp327, tmp309, tmp306)
tmp329 = tmp328 + tmp308
tmp330 = tmp2 - tmp329
tmp331 = triton_helpers.maximum(tmp330, tmp19)
tmp332 = libdevice.pow(tmp331, tmp21)
tmp333 = tmp4 - tmp329
tmp334 = triton_helpers.maximum(tmp333, tmp19)
tmp335 = libdevice.pow(tmp334, tmp21)
tmp336 = tmp332 + tmp335
tmp337 = tmp7 - tmp329
tmp338 = triton_helpers.maximum(tmp337, tmp19)
tmp339 = libdevice.pow(tmp338, tmp21)
tmp340 = tmp336 + tmp339
tmp341 = tmp10 - tmp329
tmp342 = triton_helpers.maximum(tmp341, tmp19)
tmp343 = libdevice.pow(tmp342, tmp21)
tmp344 = tmp340 + tmp343
tmp345 = tmp344 - tmp12
tmp346 = tmp345 * tmp51
tmp347 = tmp346 >= tmp19
tmp348 = tl.where(tmp347, tmp329, tmp328)
tmp349 = tmp308 * tmp1
tmp350 = tmp348 + tmp349
tmp351 = tmp2 - tmp350
tmp352 = triton_helpers.maximum(tmp351, tmp19)
tmp353 = libdevice.pow(tmp352, tmp21)
tmp354 = tmp4 - tmp350
tmp355 = triton_helpers.maximum(tmp354, tmp19)
tmp356 = libdevice.pow(tmp355, tmp21)
tmp357 = tmp353 + tmp356
tmp358 = tmp7 - tmp350
tmp359 = triton_helpers.maximum(tmp358, tmp19)
tmp360 = libdevice.pow(tmp359, tmp21)
tmp361 = tmp357 + tmp360
tmp362 = tmp10 - tmp350
tmp363 = triton_helpers.maximum(tmp362, tmp19)
tmp364 = libdevice.pow(tmp363, tmp21)
tmp365 = tmp361 + tmp364
tmp366 = tmp365 - tmp12
tmp367 = tmp366 * tmp51
tmp368 = tmp367 >= tmp19
tmp369 = tl.where(tmp368, tmp350, tmp348)
tmp370 = tmp349 * tmp1
tmp371 = tmp369 + tmp370
tmp372 = tmp2 - tmp371
tmp373 = triton_helpers.maximum(tmp372, tmp19)
tmp374 = libdevice.pow(tmp373, tmp21)
tmp375 = tmp4 - tmp371
tmp376 = triton_helpers.maximum(tmp375, tmp19)
tmp377 = libdevice.pow(tmp376, tmp21)
tmp378 = tmp374 + tmp377
tmp379 = tmp7 - tmp371
tmp380 = triton_helpers.maximum(tmp379, tmp19)
tmp381 = libdevice.pow(tmp380, tmp21)
tmp382 = tmp378 + tmp381
tmp383 = tmp10 - tmp371
tmp384 = triton_helpers.maximum(tmp383, tmp19)
tmp385 = libdevice.pow(tmp384, tmp21)
tmp386 = tmp382 + tmp385
tmp387 = tmp386 - tmp12
tmp388 = tmp387 * tmp51
tmp389 = tmp388 >= tmp19
tmp390 = tl.where(tmp389, tmp371, tmp369)
tmp391 = tmp370 * tmp1
tmp392 = tmp390 + tmp391
tmp393 = tmp2 - tmp392
tmp394 = triton_helpers.maximum(tmp393, tmp19)
tmp395 = libdevice.pow(tmp394, tmp21)
tmp396 = tmp4 - tmp392
tmp397 = triton_helpers.maximum(tmp396, tmp19)
tmp398 = libdevice.pow(tmp397, tmp21)
tmp399 = tmp395 + tmp398
tmp400 = tmp7 - tmp392
tmp401 = triton_helpers.maximum(tmp400, tmp19)
tmp402 = libdevice.pow(tmp401, tmp21)
tmp403 = tmp399 + tmp402
tmp404 = tmp10 - tmp392
tmp405 = triton_helpers.maximum(tmp404, tmp19)
tmp406 = libdevice.pow(tmp405, tmp21)
tmp407 = tmp403 + tmp406
tmp408 = tmp407 - tmp12
tmp409 = tmp408 * tmp51
tmp410 = tmp409 >= tmp19
tmp411 = tl.where(tmp410, tmp392, tmp390)
tmp412 = tmp391 * tmp1
tmp413 = tmp411 + tmp412
tmp414 = tmp2 - tmp413
tmp415 = triton_helpers.maximum(tmp414, tmp19)
tmp416 = libdevice.pow(tmp415, tmp21)
tmp417 = tmp4 - tmp413
tmp418 = triton_helpers.maximum(tmp417, tmp19)
tmp419 = libdevice.pow(tmp418, tmp21)
tmp420 = tmp416 + tmp419
tmp421 = tmp7 - tmp413
tmp422 = triton_helpers.maximum(tmp421, tmp19)
tmp423 = libdevice.pow(tmp422, tmp21)
tmp424 = tmp420 + tmp423
tmp425 = tmp10 - tmp413
tmp426 = triton_helpers.maximum(tmp425, tmp19)
tmp427 = libdevice.pow(tmp426, tmp21)
tmp428 = tmp424 + tmp427
tmp429 = tmp428 - tmp12
tmp430 = tmp429 * tmp51
tmp431 = tmp430 >= tmp19
tmp432 = tl.where(tmp431, tmp413, tmp411)
tmp433 = tmp412 * tmp1
tmp434 = tmp432 + tmp433
tmp435 = tmp2 - tmp434
tmp436 = triton_helpers.maximum(tmp435, tmp19)
tmp437 = libdevice.pow(tmp436, tmp21)
tmp438 = tmp4 - tmp434
tmp439 = triton_helpers.maximum(tmp438, tmp19)
tmp440 = libdevice.pow(tmp439, tmp21)
tmp441 = tmp437 + tmp440
tmp442 = tmp7 - tmp434
tmp443 = triton_helpers.maximum(tmp442, tmp19)
tmp444 = libdevice.pow(tmp443, tmp21)
tmp445 = tmp441 + tmp444
tmp446 = tmp10 - tmp434
tmp447 = triton_helpers.maximum(tmp446, tmp19)
tmp448 = libdevice.pow(tmp447, tmp21)
tmp449 = tmp445 + tmp448
tmp450 = tmp449 - tmp12
tmp451 = tmp450 * tmp51
tmp452 = tmp451 >= tmp19
tmp453 = tl.where(tmp452, tmp434, tmp432)
tmp454 = tmp433 * tmp1
tmp455 = tmp453 + tmp454
tmp456 = tmp2 - tmp455
tmp457 = triton_helpers.maximum(tmp456, tmp19)
tmp458 = libdevice.pow(tmp457, tmp21)
tmp459 = tmp4 - tmp455
tmp460 = triton_helpers.maximum(tmp459, tmp19)
tmp461 = libdevice.pow(tmp460, tmp21)
tmp462 = tmp458 + tmp461
tmp463 = tmp7 - tmp455
tmp464 = triton_helpers.maximum(tmp463, tmp19)
tmp465 = libdevice.pow(tmp464, tmp21)
tmp466 = tmp462 + tmp465
tmp467 = tmp10 - tmp455
tmp468 = triton_helpers.maximum(tmp467, tmp19)
tmp469 = libdevice.pow(tmp468, tmp21)
tmp470 = tmp466 + tmp469
tmp471 = tmp470 - tmp12
tmp472 = tmp471 * tmp51
tmp473 = tmp472 >= tmp19
tmp474 = tl.where(tmp473, tmp455, tmp453)
tmp475 = tmp454 * tmp1
tmp476 = tmp474 + tmp475
tmp477 = tmp2 - tmp476
tmp478 = triton_helpers.maximum(tmp477, tmp19)
tmp479 = libdevice.pow(tmp478, tmp21)
tmp480 = tmp4 - tmp476
tmp481 = triton_helpers.maximum(tmp480, tmp19)
tmp482 = libdevice.pow(tmp481, tmp21)
tmp483 = tmp479 + tmp482
tmp484 = tmp7 - tmp476
tmp485 = triton_helpers.maximum(tmp484, tmp19)
tmp486 = libdevice.pow(tmp485, tmp21)
tmp487 = tmp483 + tmp486
tmp488 = tmp10 - tmp476
tmp489 = triton_helpers.maximum(tmp488, tmp19)
tmp490 = libdevice.pow(tmp489, tmp21)
tmp491 = tmp487 + tmp490
tmp492 = tmp491 - tmp12
tmp493 = tmp492 * tmp51
tmp494 = tmp493 >= tmp19
tmp495 = tl.where(tmp494, tmp476, tmp474)
tmp496 = tmp475 * tmp1
tmp497 = tmp495 + tmp496
tmp498 = tmp2 - tmp497
tmp499 = triton_helpers.maximum(tmp498, tmp19)
tmp500 = libdevice.pow(tmp499, tmp21)
tmp501 = tmp4 - tmp497
tmp502 = triton_helpers.maximum(tmp501, tmp19)
tmp503 = libdevice.pow(tmp502, tmp21)
tmp504 = tmp500 + tmp503
tmp505 = tmp7 - tmp497
tmp506 = triton_helpers.maximum(tmp505, tmp19)
tmp507 = libdevice.pow(tmp506, tmp21)
tmp508 = tmp504 + tmp507
tmp509 = tmp10 - tmp497
tmp510 = triton_helpers.maximum(tmp509, tmp19)
tmp511 = libdevice.pow(tmp510, tmp21)
tmp512 = tmp508 + tmp511
tmp513 = tmp512 - tmp12
tmp514 = tmp513 * tmp51
tmp515 = tmp514 >= tmp19
tmp516 = tl.where(tmp515, tmp497, tmp495)
tmp517 = tmp496 * tmp1
tmp518 = tmp516 + tmp517
tmp519 = tmp2 - tmp518
tmp520 = triton_helpers.maximum(tmp519, tmp19)
tmp521 = libdevice.pow(tmp520, tmp21)
tmp522 = tmp4 - tmp518
tmp523 = triton_helpers.maximum(tmp522, tmp19)
tmp524 = libdevice.pow(tmp523, tmp21)
tmp525 = tmp521 + tmp524
tmp526 = tmp7 - tmp518
tmp527 = triton_helpers.maximum(tmp526, tmp19)
tmp528 = libdevice.pow(tmp527, tmp21)
tmp529 = tmp525 + tmp528
tmp530 = tmp10 - tmp518
tmp531 = triton_helpers.maximum(tmp530, tmp19)
tmp532 = libdevice.pow(tmp531, tmp21)
tmp533 = tmp529 + tmp532
tmp534 = tmp533 - tmp12
tmp535 = tmp534 * tmp51
tmp536 = tmp535 >= tmp19
tmp537 = tl.where(tmp536, tmp518, tmp516)
tmp538 = tmp517 * tmp1
tmp539 = tmp537 + tmp538
tmp540 = tmp2 - tmp539
tmp541 = triton_helpers.maximum(tmp540, tmp19)
tmp542 = libdevice.pow(tmp541, tmp21)
tmp543 = tmp4 - tmp539
tmp544 = triton_helpers.maximum(tmp543, tmp19)
tmp545 = libdevice.pow(tmp544, tmp21)
tmp546 = tmp542 + tmp545
tmp547 = tmp7 - tmp539
tmp548 = triton_helpers.maximum(tmp547, tmp19)
tmp549 = libdevice.pow(tmp548, tmp21)
tmp550 = tmp546 + tmp549
tmp551 = tmp10 - tmp539
tmp552 = triton_helpers.maximum(tmp551, tmp19)
tmp553 = libdevice.pow(tmp552, tmp21)
tmp554 = tmp550 + tmp553
tmp555 = tmp554 - tmp12
tmp556 = tmp555 * tmp51
tmp557 = tmp556 >= tmp19
tmp558 = tl.where(tmp557, tmp539, tmp537)
tmp559 = tmp538 * tmp1
tmp560 = tmp558 + tmp559
tmp561 = tmp2 - tmp560
tmp562 = triton_helpers.maximum(tmp561, tmp19)
tmp563 = libdevice.pow(tmp562, tmp21)
tmp564 = tmp4 - tmp560
tmp565 = triton_helpers.maximum(tmp564, tmp19)
tmp566 = libdevice.pow(tmp565, tmp21)
tmp567 = tmp563 + tmp566
tmp568 = tmp7 - tmp560
tmp569 = triton_helpers.maximum(tmp568, tmp19)
tmp570 = libdevice.pow(tmp569, tmp21)
tmp571 = tmp567 + tmp570
tmp572 = tmp10 - tmp560
tmp573 = triton_helpers.maximum(tmp572, tmp19)
tmp574 = libdevice.pow(tmp573, tmp21)
tmp575 = tmp571 + tmp574
tl.store(out_ptr0 + (x0), tmp49, xmask)
tl.store(out_ptr19 + (x0), tmp308, xmask)
tl.store(in_out_ptr12 + (x0), tmp558, xmask)
tl.store(out_ptr27 + (x0), tmp575, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wp/cwps7jwo24axevjav4czdrzol2hcmol74gjjohufcaso6g5zh2my.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, sub_87, clamp_27, truediv_27, p_m_26], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow]
# Source node to ATen node mapping:
# X => mul
# clamp_27 => clamp_min_27
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# p_m_26 => pow_30
# sub => full_default
# sub_87 => sub_87
# tau_lo_26 => where_25
# tau_m_25 => add_25
# tau_m_26 => add_26
# truediv_27 => full_default_30
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {})
# %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {})
# %sub_87 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_26), kwargs = {})
# %clamp_min_27 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_87, 0), kwargs = {})
# %full_default_30 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_30 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_27, %full_default_30), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_where_1 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_where_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_where_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_where_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp11 + tmp23
tmp25 = tl.where(tmp10, tmp24, tmp11)
tmp26 = tmp23 * tmp1
tmp27 = tmp25 + tmp26
tmp28 = tmp2 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp9)
tmp30 = 2.0
tmp31 = libdevice.pow(tmp29, tmp30)
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/h7/ch7plngbxligq5e4vnkulisynihcx6p3zjpiwu3vq4ul6nzoouq7.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, tau_lo_27, dm_28, tau_m_27, sub_90, clamp_28, truediv_28, p_m_27, sum_29], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
# Source node to ATen node mapping:
# X => mul
# clamp_28 => clamp_min_28
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# p_m_27 => pow_31
# sub => full_default
# sub_90 => sub_90
# sum_29 => sum_29
# tau_lo_26 => where_25
# tau_lo_27 => where_26
# tau_m_25 => add_25
# tau_m_26 => add_26
# tau_m_27 => add_27
# truediv_28 => full_default_31
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_24, %div_25), kwargs = {})
# %where_25 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_25, %add_25, %where_24), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %add_26 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_25, %div_26), kwargs = {})
# %where_26 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_26, %add_26, %where_25), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {})
# %sub_90 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_27), kwargs = {})
# %clamp_min_28 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_90, 0), kwargs = {})
# %full_default_31 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_31 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_28, %full_default_31), kwargs = {})
# %sum_29 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_31, [-1]), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0), xmask)
tmp14 = tl.load(in_ptr2 + (x0), xmask)
tmp18 = tl.load(in_out_ptr0 + (x0), xmask)
tmp19 = tl.load(in_ptr3 + (x0), xmask)
tmp37 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp15 = tmp14 - tmp7
tmp16 = tmp15 * tmp10
tmp17 = tmp16 >= tmp12
tmp20 = 0.5
tmp21 = tmp19 * tmp20
tmp22 = tmp21 * tmp20
tmp23 = tmp22 * tmp20
tmp24 = tmp23 * tmp20
tmp25 = tmp24 * tmp20
tmp26 = tmp25 * tmp20
tmp27 = tmp26 * tmp20
tmp28 = tmp27 * tmp20
tmp29 = tmp28 * tmp20
tmp30 = tmp29 * tmp20
tmp31 = tmp30 * tmp20
tmp32 = tmp18 + tmp31
tmp33 = tl.where(tmp17, tmp32, tmp18)
tmp34 = tmp31 * tmp20
tmp35 = tmp33 + tmp34
tmp36 = tl.where(tmp13, tmp35, tmp33)
tmp38 = tmp37 * tmp20
tmp39 = tmp34 * tmp20
tmp40 = tmp36 + tmp39
tmp41 = tmp38 - tmp40
tmp42 = triton_helpers.maximum(tmp41, tmp12)
tmp43 = 2.0
tmp44 = libdevice.pow(tmp42, tmp43)
tmp46 = tmp45 * tmp20
tmp47 = tmp46 - tmp40
tmp48 = triton_helpers.maximum(tmp47, tmp12)
tmp49 = libdevice.pow(tmp48, tmp43)
tmp50 = tmp44 + tmp49
tmp52 = tmp51 * tmp20
tmp53 = tmp52 - tmp40
tmp54 = triton_helpers.maximum(tmp53, tmp12)
tmp55 = libdevice.pow(tmp54, tmp43)
tmp56 = tmp50 + tmp55
tmp58 = tmp57 * tmp20
tmp59 = tmp58 - tmp40
tmp60 = triton_helpers.maximum(tmp59, tmp12)
tmp61 = libdevice.pow(tmp60, tmp43)
tmp62 = tmp56 + tmp61
tl.store(in_out_ptr0 + (x0), tmp36, xmask)
tl.store(out_ptr0 + (x0), tmp62, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xe/cxe754wrl5sjitrmajy2srrq6d2mjokc5tmty2qkwvhzgcsuyn5m.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, sub_93], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# X => mul
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# sub => full_default
# sub_93 => sub_93
# tau_lo_28 => where_27
# tau_m_27 => add_27
# tau_m_28 => add_28
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {})
# %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {})
# %sub_93 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_28), kwargs = {})
triton_poi_fused_add_div_mul_sub_where_3 = async_compile.triton('triton_poi_fused_add_div_mul_sub_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp11 + tmp25
tmp27 = tl.where(tmp10, tmp26, tmp11)
tmp28 = tmp25 * tmp1
tmp29 = tmp27 + tmp28
tmp30 = tmp2 - tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/if/cif3u3drxkf366ugeo7gtgyy55gcup24bhcpwne6p454cqre4263.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, tau_lo_29, dm_30, tau_m_29, sub_96, clamp_30, truediv_30, p_m_29, sum_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
# Source node to ATen node mapping:
# X => mul
# clamp_30 => clamp_min_30
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# p_m_29 => pow_33
# sub => full_default
# sub_96 => sub_96
# sum_31 => sum_31
# tau_lo_28 => where_27
# tau_lo_29 => where_28
# tau_m_27 => add_27
# tau_m_28 => add_28
# tau_m_29 => add_29
# truediv_30 => full_default_33
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_26, %div_27), kwargs = {})
# %where_27 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_27, %add_27, %where_26), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %add_28 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_27, %div_28), kwargs = {})
# %where_28 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_28, %add_28, %where_27), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {})
# %sub_96 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_29), kwargs = {})
# %clamp_min_30 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_96, 0), kwargs = {})
# %full_default_33 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_33 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_30, %full_default_33), kwargs = {})
# %sum_31 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_33, [-1]), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x0), xmask)
tmp23 = tl.load(in_ptr2 + (x0), xmask)
tmp27 = tl.load(in_out_ptr0 + (x0), xmask)
tmp28 = tl.load(in_ptr3 + (x0), xmask)
tmp48 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp67 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp24 = tmp23 - tmp17
tmp25 = tmp24 * tmp20
tmp26 = tmp25 >= tmp1
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = tmp30 * tmp29
tmp32 = tmp31 * tmp29
tmp33 = tmp32 * tmp29
tmp34 = tmp33 * tmp29
tmp35 = tmp34 * tmp29
tmp36 = tmp35 * tmp29
tmp37 = tmp36 * tmp29
tmp38 = tmp37 * tmp29
tmp39 = tmp38 * tmp29
tmp40 = tmp39 * tmp29
tmp41 = tmp40 * tmp29
tmp42 = tmp41 * tmp29
tmp43 = tmp27 + tmp42
tmp44 = tl.where(tmp26, tmp43, tmp27)
tmp45 = tmp42 * tmp29
tmp46 = tmp44 + tmp45
tmp47 = tl.where(tmp22, tmp46, tmp44)
tmp49 = tmp48 * tmp29
tmp50 = tmp45 * tmp29
tmp51 = tmp47 + tmp50
tmp52 = tmp49 - tmp51
tmp53 = triton_helpers.maximum(tmp52, tmp1)
tmp54 = libdevice.pow(tmp53, tmp3)
tmp56 = tmp55 * tmp29
tmp57 = tmp56 - tmp51
tmp58 = triton_helpers.maximum(tmp57, tmp1)
tmp59 = libdevice.pow(tmp58, tmp3)
tmp60 = tmp54 + tmp59
tmp62 = tmp61 * tmp29
tmp63 = tmp62 - tmp51
tmp64 = triton_helpers.maximum(tmp63, tmp1)
tmp65 = libdevice.pow(tmp64, tmp3)
tmp66 = tmp60 + tmp65
tmp68 = tmp67 * tmp29
tmp69 = tmp68 - tmp51
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = libdevice.pow(tmp70, tmp3)
tmp72 = tmp66 + tmp71
tl.store(in_out_ptr0 + (x0), tmp47, xmask)
tl.store(out_ptr0 + (x0), tmp72, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dr/cdrunpih7elheat4jd7sxrqywzlxcqzpus4s2kn3q77rb3owzmr3.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, sub_99], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# X => mul
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# sub => full_default
# sub_99 => sub_99
# tau_lo_30 => where_29
# tau_m_29 => add_29
# tau_m_30 => add_30
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {})
# %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {})
# %sub_99 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_30), kwargs = {})
triton_poi_fused_add_div_mul_sub_where_5 = async_compile.triton('triton_poi_fused_add_div_mul_sub_where_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_where_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_where_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp11 + tmp27
tmp29 = tl.where(tmp10, tmp28, tmp11)
tmp30 = tmp27 * tmp1
tmp31 = tmp29 + tmp30
tmp32 = tmp2 - tmp31
tl.store(out_ptr0 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6v/c6vga2tcnyteymwttvjbp5mmaopovt5iyiazfoj4b7d6pqavknf6.py
# Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, tau_lo_31, dm_32, tau_m_31, sub_102, clamp_32, truediv_32, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_105, clamp_33, truediv_33, p_m_32, sum_34, f_m_32, mul_33, tau_lo_33, dm_34, tau_m_33, sub_108, clamp_34, truediv_34, p_m_33, sum_35, tau_lo_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
# Source node to ATen node mapping:
# X => mul
# clamp_32 => clamp_min_32
# clamp_33 => clamp_min_33
# clamp_34 => clamp_min_34
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# f_lo => sub_7
# f_m_32 => sub_107
# mul_33 => mul_67
# p_m_31 => pow_35
# p_m_32 => pow_36
# p_m_33 => pow_37
# sub => full_default
# sub_102 => sub_102
# sub_105 => sub_105
# sub_108 => sub_108
# sum_33 => sum_33
# sum_34 => sum_34
# sum_35 => sum_35
# tau_lo_30 => where_29
# tau_lo_31 => where_30
# tau_lo_32 => where_31
# tau_lo_33 => where_32
# tau_lo_34 => where_33
# tau_m_29 => add_29
# tau_m_30 => add_30
# tau_m_31 => add_31
# tau_m_32 => add_32
# tau_m_33 => add_33
# truediv_32 => full_default_35
# truediv_33 => full_default_36
# truediv_34 => full_default_37
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %sub_7 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %add_29 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_28, %div_29), kwargs = {})
# %where_29 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_29, %add_29, %where_28), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %add_30 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_29, %div_30), kwargs = {})
# %where_30 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_30, %add_30, %where_29), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %add_31 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_30, %div_31), kwargs = {})
# %sub_102 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_31), kwargs = {})
# %clamp_min_32 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_102, 0), kwargs = {})
# %full_default_35 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_35 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_32, %full_default_35), kwargs = {})
# %sum_33 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_35, [-1]), kwargs = {})
# %where_31 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_31, %add_31, %where_30), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_31, %div_32), kwargs = {})
# %sub_105 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_32), kwargs = {})
# %clamp_min_33 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_105, 0), kwargs = {})
# %full_default_36 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_36 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_33, %full_default_36), kwargs = {})
# %sum_34 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_36, [-1]), kwargs = {})
# %sub_107 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_34, 1), kwargs = {})
# %mul_67 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_107, %sub_7), kwargs = {})
# %where_32 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_32, %add_32, %where_31), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %add_33 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_32, %div_33), kwargs = {})
# %sub_108 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_33), kwargs = {})
# %clamp_min_34 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_108, 0), kwargs = {})
# %full_default_37 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_37 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_34, %full_default_37), kwargs = {})
# %sum_35 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_37, [-1]), kwargs = {})
# %where_33 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_33, %add_33, %where_32), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr2'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6(in_out_ptr0, in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x0), xmask)
tmp23 = tl.load(in_ptr2 + (x0), xmask)
tmp27 = tl.load(in_out_ptr0 + (x0), xmask)
tmp28 = tl.load(in_ptr3 + (x0), xmask)
tmp50 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp63 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp69 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp24 = tmp23 - tmp17
tmp25 = tmp24 * tmp20
tmp26 = tmp25 >= tmp1
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = tmp30 * tmp29
tmp32 = tmp31 * tmp29
tmp33 = tmp32 * tmp29
tmp34 = tmp33 * tmp29
tmp35 = tmp34 * tmp29
tmp36 = tmp35 * tmp29
tmp37 = tmp36 * tmp29
tmp38 = tmp37 * tmp29
tmp39 = tmp38 * tmp29
tmp40 = tmp39 * tmp29
tmp41 = tmp40 * tmp29
tmp42 = tmp41 * tmp29
tmp43 = tmp42 * tmp29
tmp44 = tmp43 * tmp29
tmp45 = tmp27 + tmp44
tmp46 = tl.where(tmp26, tmp45, tmp27)
tmp47 = tmp44 * tmp29
tmp48 = tmp46 + tmp47
tmp49 = tl.where(tmp22, tmp48, tmp46)
tmp51 = tmp50 * tmp29
tmp52 = tmp47 * tmp29
tmp53 = tmp49 + tmp52
tmp54 = tmp51 - tmp53
tmp55 = triton_helpers.maximum(tmp54, tmp1)
tmp56 = libdevice.pow(tmp55, tmp3)
tmp58 = tmp57 * tmp29
tmp59 = tmp58 - tmp53
tmp60 = triton_helpers.maximum(tmp59, tmp1)
tmp61 = libdevice.pow(tmp60, tmp3)
tmp62 = tmp56 + tmp61
tmp64 = tmp63 * tmp29
tmp65 = tmp64 - tmp53
tmp66 = triton_helpers.maximum(tmp65, tmp1)
tmp67 = libdevice.pow(tmp66, tmp3)
tmp68 = tmp62 + tmp67
tmp70 = tmp69 * tmp29
tmp71 = tmp70 - tmp53
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = libdevice.pow(tmp72, tmp3)
tmp74 = tmp68 + tmp73
tmp75 = tmp74 - tmp17
tmp76 = tmp75 * tmp20
tmp77 = tmp76 >= tmp1
tmp78 = tl.where(tmp77, tmp53, tmp49)
tmp79 = tmp52 * tmp29
tmp80 = tmp78 + tmp79
tmp81 = tmp51 - tmp80
tmp82 = triton_helpers.maximum(tmp81, tmp1)
tmp83 = libdevice.pow(tmp82, tmp3)
tmp84 = tmp58 - tmp80
tmp85 = triton_helpers.maximum(tmp84, tmp1)
tmp86 = libdevice.pow(tmp85, tmp3)
tmp87 = tmp83 + tmp86
tmp88 = tmp64 - tmp80
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = libdevice.pow(tmp89, tmp3)
tmp91 = tmp87 + tmp90
tmp92 = tmp70 - tmp80
tmp93 = triton_helpers.maximum(tmp92, tmp1)
tmp94 = libdevice.pow(tmp93, tmp3)
tmp95 = tmp91 + tmp94
tmp96 = tmp95 - tmp17
tmp97 = tmp96 * tmp20
tmp98 = tmp97 >= tmp1
tmp99 = tl.where(tmp98, tmp80, tmp78)
tmp100 = tmp79 * tmp29
tmp101 = tmp99 + tmp100
tmp102 = tmp51 - tmp101
tmp103 = triton_helpers.maximum(tmp102, tmp1)
tmp104 = libdevice.pow(tmp103, tmp3)
tmp105 = tmp58 - tmp101
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = libdevice.pow(tmp106, tmp3)
tmp108 = tmp104 + tmp107
tmp109 = tmp64 - tmp101
tmp110 = triton_helpers.maximum(tmp109, tmp1)
tmp111 = libdevice.pow(tmp110, tmp3)
tmp112 = tmp108 + tmp111
tmp113 = tmp70 - tmp101
tmp114 = triton_helpers.maximum(tmp113, tmp1)
tmp115 = libdevice.pow(tmp114, tmp3)
tmp116 = tmp112 + tmp115
tmp117 = tmp116 - tmp17
tmp118 = tmp117 * tmp20
tmp119 = tmp118 >= tmp1
tmp120 = tl.where(tmp119, tmp101, tmp99)
tl.store(in_out_ptr2 + (x0), tmp120, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qw/cqwjirql6abn7imghsni2jlkuybk4xkokodof2rpil4tssyex4uw.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, sub_111, clamp_35, truediv_35, p_m_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow]
# Source node to ATen node mapping:
# X => mul
# clamp_35 => clamp_min_35
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# p_m_34 => pow_38
# sub => full_default
# sub_111 => sub_111
# tau_m_34 => add_34
# truediv_35 => full_default_38
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {})
# %sub_111 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_34), kwargs = {})
# %clamp_min_35 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_111, 0), kwargs = {})
# %full_default_38 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_38 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_35, %full_default_38), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_7 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp3 + tmp24
tmp26 = tmp2 - tmp25
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = 2.0
tmp30 = libdevice.pow(tmp28, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3q/c3qnhz5l3skrhqr43bpfxlmkw6kumz7pxqtyenon3vddp3cudnfx.py
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, tau_lo_35], Original ATen: [aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# tau_lo_35 => where_34
# tau_m_34 => add_34
# Graph fragment:
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_33, %div_34), kwargs = {})
# %where_34 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_34, %add_34, %where_33), kwargs = {})
triton_poi_fused_add_div_where_8 = async_compile.triton('triton_poi_fused_add_div_where_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_where_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0), xmask)
tmp14 = tl.load(in_out_ptr0 + (x0), xmask)
tmp15 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp19 * tmp16
tmp21 = tmp20 * tmp16
tmp22 = tmp21 * tmp16
tmp23 = tmp22 * tmp16
tmp24 = tmp23 * tmp16
tmp25 = tmp24 * tmp16
tmp26 = tmp25 * tmp16
tmp27 = tmp26 * tmp16
tmp28 = tmp27 * tmp16
tmp29 = tmp28 * tmp16
tmp30 = tmp29 * tmp16
tmp31 = tmp30 * tmp16
tmp32 = tmp31 * tmp16
tmp33 = tmp32 * tmp16
tmp34 = tmp33 * tmp16
tmp35 = tmp34 * tmp16
tmp36 = tmp35 * tmp16
tmp37 = tmp14 + tmp36
tmp38 = tl.where(tmp13, tmp37, tmp14)
tl.store(in_out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/65/c65c6rqbfpxnnf52lttfmjffp6c57rq5vh5zishetax476bzxbyn.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, sub_114, clamp_36, truediv_36, p_m_35], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow]
# Source node to ATen node mapping:
# X => mul
# clamp_36 => clamp_min_36
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# p_m_35 => pow_39
# sub => full_default
# sub_114 => sub_114
# tau_m_35 => add_35
# truediv_36 => full_default_39
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {})
# %sub_114 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_35), kwargs = {})
# %clamp_min_36 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_114, 0), kwargs = {})
# %full_default_39 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_39 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_36, %full_default_39), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_9 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_9(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp3 + tmp25
tmp27 = tmp2 - tmp26
tmp28 = 0.0
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = 2.0
tmp31 = libdevice.pow(tmp29, tmp30)
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hc/chcgwhxqzahejjnkomdddn3fpfvhgpal4i42dvbnrcw5fjtsuuip.py
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, tau_lo_36], Original ATen: [aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# tau_lo_36 => where_35
# tau_m_35 => add_35
# Graph fragment:
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %add_35 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_34, %div_35), kwargs = {})
# %where_35 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_35, %add_35, %where_34), kwargs = {})
triton_poi_fused_add_div_where_10 = async_compile.triton('triton_poi_fused_add_div_where_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_where_10(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (x0), xmask)
tmp14 = tl.load(in_out_ptr0 + (x0), xmask)
tmp15 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp19 * tmp16
tmp21 = tmp20 * tmp16
tmp22 = tmp21 * tmp16
tmp23 = tmp22 * tmp16
tmp24 = tmp23 * tmp16
tmp25 = tmp24 * tmp16
tmp26 = tmp25 * tmp16
tmp27 = tmp26 * tmp16
tmp28 = tmp27 * tmp16
tmp29 = tmp28 * tmp16
tmp30 = tmp29 * tmp16
tmp31 = tmp30 * tmp16
tmp32 = tmp31 * tmp16
tmp33 = tmp32 * tmp16
tmp34 = tmp33 * tmp16
tmp35 = tmp34 * tmp16
tmp36 = tmp35 * tmp16
tmp37 = tmp36 * tmp16
tmp38 = tmp14 + tmp37
tmp39 = tl.where(tmp13, tmp38, tmp14)
tl.store(in_out_ptr0 + (x0), tmp39, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hk/chkgkmcorveaoty2aubj4aiqs7c4icysi6i7uztapg7qoahqwomz.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, sub_117, clamp_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp]
# Source node to ATen node mapping:
# X => mul
# clamp_37 => clamp_min_37
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# sub => full_default
# sub_117 => sub_117
# tau_m_36 => add_36
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {})
# %sub_117 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_36), kwargs = {})
# %clamp_min_37 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_117, 0), kwargs = {})
triton_poi_fused_add_clamp_div_mul_sub_11 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_sub_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_sub_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_sub_11(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp3 + tmp26
tmp28 = tmp2 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6j/c6jx7jobgsfa3dnpqdh4py4h2yjwqcnyfrnfxf4y2pamiu6cwdpl.py
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, tau_lo_37], Original ATen: [aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# tau_lo_37 => where_36
# tau_m_36 => add_36
# Graph fragment:
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %add_36 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_35, %div_36), kwargs = {})
# %where_36 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_36, %add_36, %where_35), kwargs = {})
triton_poi_fused_add_div_where_12 = async_compile.triton('triton_poi_fused_add_div_where_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_where_12(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (x0), xmask)
tmp19 = tl.load(in_out_ptr0 + (x0), xmask)
tmp20 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 2.0
tmp2 = libdevice.pow(tmp0, tmp1)
tmp4 = libdevice.pow(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = libdevice.pow(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = libdevice.pow(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp15 = tmp14 - tmp12
tmp16 = tmp13 * tmp15
tmp17 = 0.0
tmp18 = tmp16 >= tmp17
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp24 * tmp21
tmp26 = tmp25 * tmp21
tmp27 = tmp26 * tmp21
tmp28 = tmp27 * tmp21
tmp29 = tmp28 * tmp21
tmp30 = tmp29 * tmp21
tmp31 = tmp30 * tmp21
tmp32 = tmp31 * tmp21
tmp33 = tmp32 * tmp21
tmp34 = tmp33 * tmp21
tmp35 = tmp34 * tmp21
tmp36 = tmp35 * tmp21
tmp37 = tmp36 * tmp21
tmp38 = tmp37 * tmp21
tmp39 = tmp38 * tmp21
tmp40 = tmp39 * tmp21
tmp41 = tmp40 * tmp21
tmp42 = tmp41 * tmp21
tmp43 = tmp42 * tmp21
tmp44 = tmp19 + tmp43
tmp45 = tl.where(tmp18, tmp44, tmp19)
tl.store(in_out_ptr0 + (x0), tmp45, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dg/cdgzbjixzgjbzoonwf327zqettjzik6jiaut4vfckxwzzrjpw4ci.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, sub_120, clamp_38], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp]
# Source node to ATen node mapping:
# X => mul
# clamp_38 => clamp_min_38
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# sub => full_default
# sub_120 => sub_120
# tau_m_37 => add_37
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {})
# %sub_120 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_37), kwargs = {})
# %clamp_min_38 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_120, 0), kwargs = {})
triton_poi_fused_add_clamp_div_mul_sub_13 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_sub_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_sub_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_sub_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp3 + tmp27
tmp29 = tmp2 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/f2/cf2smd7msnlfnwkswpdfa2pro4t25uk7k6sqqtuhugmitppzjw2h.py
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, tau_lo_38], Original ATen: [aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# tau_lo_38 => where_37
# tau_m_37 => add_37
# Graph fragment:
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %add_37 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_36, %div_37), kwargs = {})
# %where_37 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_37, %add_37, %where_36), kwargs = {})
triton_poi_fused_add_div_where_14 = async_compile.triton('triton_poi_fused_add_div_where_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_where_14(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + (x0), xmask)
tmp19 = tl.load(in_out_ptr0 + (x0), xmask)
tmp20 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 2.0
tmp2 = libdevice.pow(tmp0, tmp1)
tmp4 = libdevice.pow(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = libdevice.pow(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = libdevice.pow(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp15 = tmp14 - tmp12
tmp16 = tmp13 * tmp15
tmp17 = 0.0
tmp18 = tmp16 >= tmp17
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp24 * tmp21
tmp26 = tmp25 * tmp21
tmp27 = tmp26 * tmp21
tmp28 = tmp27 * tmp21
tmp29 = tmp28 * tmp21
tmp30 = tmp29 * tmp21
tmp31 = tmp30 * tmp21
tmp32 = tmp31 * tmp21
tmp33 = tmp32 * tmp21
tmp34 = tmp33 * tmp21
tmp35 = tmp34 * tmp21
tmp36 = tmp35 * tmp21
tmp37 = tmp36 * tmp21
tmp38 = tmp37 * tmp21
tmp39 = tmp38 * tmp21
tmp40 = tmp39 * tmp21
tmp41 = tmp40 * tmp21
tmp42 = tmp41 * tmp21
tmp43 = tmp42 * tmp21
tmp44 = tmp43 * tmp21
tmp45 = tmp19 + tmp44
tmp46 = tl.where(tmp18, tmp45, tmp19)
tl.store(in_out_ptr0 + (x0), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/m6/cm6g7ftzm55nml3oy3bwbqmue7bg6inetry6yvug7w4kb6ev5rty.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, sub_123], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
# Source node to ATen node mapping:
# X => mul
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# sub => full_default
# sub_123 => sub_123
# tau_m_38 => add_38
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {})
# %sub_123 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_38), kwargs = {})
triton_poi_fused_add_div_mul_sub_15 = async_compile.triton('triton_poi_fused_add_div_mul_sub_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp3 + tmp28
tmp30 = tmp2 - tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/it/citgidtovhh7ieg5w6kgdaztr74d5qimxq5xchpfcretvgwraeaz.py
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, tau_lo_39], Original ATen: [aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# tau_lo_39 => where_38
# tau_m_38 => add_38
# Graph fragment:
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %add_38 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_37, %div_38), kwargs = {})
# %where_38 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_38, %add_38, %where_37), kwargs = {})
triton_poi_fused_add_div_where_16 = async_compile.triton('triton_poi_fused_add_div_where_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_where_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x0), xmask)
tmp23 = tl.load(in_out_ptr0 + (x0), xmask)
tmp24 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp23 + tmp49
tmp51 = tl.where(tmp22, tmp50, tmp23)
tl.store(in_out_ptr0 + (x0), tmp51, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ow/cowrvlpr3aq5lvglsdero5kvdkcaqmnxh7ylqy6wg5asbxkwbugf.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, sub_126], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
# Source node to ATen node mapping:
# X => mul
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# sub => full_default
# sub_126 => sub_126
# tau_m_39 => add_39
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {})
# %sub_126 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_39), kwargs = {})
triton_poi_fused_add_div_mul_sub_17 = async_compile.triton('triton_poi_fused_add_div_mul_sub_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp28 * tmp1
tmp30 = tmp3 + tmp29
tmp31 = tmp2 - tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/z4/cz4ddiop67cjhvnymyeyxeouq7iyjrkpudsvnur2jqxca35zlu35.py
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, tau_lo_40], Original ATen: [aten.div, aten.add, aten.where]
# Source node to ATen node mapping:
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# tau_lo_40 => where_39
# tau_m_39 => add_39
# Graph fragment:
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %add_39 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_38, %div_39), kwargs = {})
# %where_39 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_39, %add_39, %where_38), kwargs = {})
triton_poi_fused_add_div_where_18 = async_compile.triton('triton_poi_fused_add_div_where_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_where_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_where_18(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x0), xmask)
tmp23 = tl.load(in_out_ptr0 + (x0), xmask)
tmp24 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp49 * tmp25
tmp51 = tmp23 + tmp50
tmp52 = tl.where(tmp22, tmp51, tmp23)
tl.store(in_out_ptr0 + (x0), tmp52, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/xp/cxphrrfdmolvfd4qixifhmbpdrlsdb6np4qopmkccumu7xs7u2vv.py
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_129], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
# Source node to ATen node mapping:
# X => mul
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# sub => full_default
# sub_129 => sub_129
# tau_m_40 => add_40
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %sub_129 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_40), kwargs = {})
triton_poi_fused_add_div_mul_sub_19 = async_compile.triton('triton_poi_fused_add_div_mul_sub_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp28 * tmp1
tmp30 = tmp29 * tmp1
tmp31 = tmp3 + tmp30
tmp32 = tmp2 - tmp31
tl.store(out_ptr0 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/wa/cwaa5hzx7dhpx3ghsy7xh5ykl5hdag5jzcuoogoqdl4kgv7awvbb.py
# Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, tau_lo_41, dm_42, tau_m_41, sub_132, clamp_42, truediv_42, p_m_41, sum_43, f_m_41, mul_42, tau_lo_42, dm_43, tau_m_42, sub_135, clamp_43, truediv_43, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43, dm_44, tau_m_43, sub_138, clamp_44, truediv_44, p_m_43, sum_45, f_m_43, tau_lo_44, dm_45, tau_m_44, sub_141, clamp_45, truediv_45, p_m_44, sum_46, tau_lo_45, dm_46, tau_m_45, sub_144, clamp_46, truediv_46, p_m_45, sum_47, tau_lo_46, dm_47, tau_m_46, sub_147, clamp_47, truediv_47, p_m_46, sum_48, tau_lo_47, dm_48, tau_m_47, sub_150, clamp_48, truediv_48, p_m_47, sum_49, tau_lo_48, dm_49, tau_m_48, sub_153, clamp_49, truediv_49, p_m_48, sum_50, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, sum_52], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
# Source node to ATen node mapping:
# X => mul
# clamp_42 => clamp_min_42
# clamp_43 => clamp_min_43
# clamp_44 => clamp_min_44
# clamp_45 => clamp_min_45
# clamp_46 => clamp_min_46
# clamp_47 => clamp_min_47
# clamp_48 => clamp_min_48
# clamp_49 => clamp_min_49
# clamp_50 => clamp_min_50
# dm_16 => div_15
# dm_17 => div_16
# dm_18 => div_17
# dm_19 => div_18
# dm_20 => div_19
# dm_21 => div_20
# dm_22 => div_21
# dm_23 => div_22
# dm_24 => div_23
# dm_25 => div_24
# dm_26 => div_25
# dm_27 => div_26
# dm_28 => div_27
# dm_29 => div_28
# dm_30 => div_29
# dm_31 => div_30
# dm_32 => div_31
# dm_33 => div_32
# dm_34 => div_33
# dm_35 => div_34
# dm_36 => div_35
# dm_37 => div_36
# dm_38 => div_37
# dm_39 => div_38
# dm_40 => div_39
# dm_41 => div_40
# dm_42 => div_41
# dm_43 => div_42
# dm_44 => div_43
# dm_45 => div_44
# dm_46 => div_45
# dm_47 => div_46
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# f_lo => sub_7
# f_m_41 => sub_134
# f_m_42 => sub_137
# f_m_43 => sub_140
# mul_42 => mul_85
# mul_43 => mul_87
# p_m_41 => pow_45
# p_m_42 => pow_46
# p_m_43 => pow_47
# p_m_44 => pow_48
# p_m_45 => pow_49
# p_m_46 => pow_50
# p_m_47 => pow_51
# p_m_48 => pow_52
# p_m_49 => pow_53
# sub => full_default
# sub_132 => sub_132
# sub_135 => sub_135
# sub_138 => sub_138
# sub_141 => sub_141
# sub_144 => sub_144
# sub_147 => sub_147
# sub_150 => sub_150
# sub_153 => sub_153
# sub_156 => sub_156
# sum_43 => sum_43
# sum_44 => sum_44
# sum_45 => sum_45
# sum_46 => sum_46
# sum_47 => sum_47
# sum_48 => sum_48
# sum_49 => sum_49
# sum_50 => sum_50
# sum_52 => sum_52
# tau_lo_41 => where_40
# tau_lo_42 => where_41
# tau_lo_43 => where_42
# tau_lo_44 => where_43
# tau_lo_45 => where_44
# tau_lo_46 => where_45
# tau_lo_47 => where_46
# tau_lo_48 => where_47
# tau_lo_49 => where_48
# tau_m_40 => add_40
# tau_m_41 => add_41
# tau_m_42 => add_42
# tau_m_43 => add_43
# tau_m_44 => add_44
# tau_m_45 => add_45
# tau_m_46 => add_46
# tau_m_47 => add_47
# tau_m_48 => add_48
# tau_m_49 => add_49
# truediv_42 => full_default_45
# truediv_43 => full_default_46
# truediv_44 => full_default_47
# truediv_45 => full_default_48
# truediv_46 => full_default_49
# truediv_47 => full_default_50
# truediv_48 => full_default_51
# truediv_49 => full_default_52
# truediv_50 => full_default_53
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %sub_7 : [num_users=49] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, 1), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_14, 2), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_15, 2), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_16, 2), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_17, 2), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_18, 2), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_19, 2), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_20, 2), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_21, 2), kwargs = {})
# %div_23 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_22, 2), kwargs = {})
# %div_24 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_23, 2), kwargs = {})
# %div_25 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_24, 2), kwargs = {})
# %div_26 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_25, 2), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_26, 2), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_27, 2), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_28, 2), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_29, 2), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_30, 2), kwargs = {})
# %div_32 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_31, 2), kwargs = {})
# %div_33 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_32, 2), kwargs = {})
# %div_34 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_33, 2), kwargs = {})
# %div_35 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_34, 2), kwargs = {})
# %div_36 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_35, 2), kwargs = {})
# %div_37 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_36, 2), kwargs = {})
# %div_38 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_37, 2), kwargs = {})
# %div_39 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_38, 2), kwargs = {})
# %div_40 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_39, 2), kwargs = {})
# %add_40 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_39, %div_40), kwargs = {})
# %where_40 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_40, %add_40, %where_39), kwargs = {})
# %div_41 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_40, 2), kwargs = {})
# %add_41 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_40, %div_41), kwargs = {})
# %sub_132 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_41), kwargs = {})
# %clamp_min_42 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_132, 0), kwargs = {})
# %full_default_45 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_45 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_42, %full_default_45), kwargs = {})
# %sum_43 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_45, [-1]), kwargs = {})
# %sub_134 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_43, 1), kwargs = {})
# %mul_85 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_134, %sub_7), kwargs = {})
# %where_41 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_41, %add_41, %where_40), kwargs = {})
# %div_42 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_41, 2), kwargs = {})
# %add_42 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_41, %div_42), kwargs = {})
# %sub_135 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_42), kwargs = {})
# %clamp_min_43 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_135, 0), kwargs = {})
# %full_default_46 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_46 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_43, %full_default_46), kwargs = {})
# %sum_44 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_46, [-1]), kwargs = {})
# %sub_137 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_44, 1), kwargs = {})
# %mul_87 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_137, %sub_7), kwargs = {})
# %where_42 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_42, %add_42, %where_41), kwargs = {})
# %div_43 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_42, 2), kwargs = {})
# %add_43 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_42, %div_43), kwargs = {})
# %sub_138 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_43), kwargs = {})
# %clamp_min_44 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_138, 0), kwargs = {})
# %full_default_47 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_47 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_44, %full_default_47), kwargs = {})
# %sum_45 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_47, [-1]), kwargs = {})
# %sub_140 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_45, 1), kwargs = {})
# %where_43 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_43, %add_43, %where_42), kwargs = {})
# %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {})
# %add_44 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_43, %div_44), kwargs = {})
# %sub_141 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_44), kwargs = {})
# %clamp_min_45 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_141, 0), kwargs = {})
# %full_default_48 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_48 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_45, %full_default_48), kwargs = {})
# %sum_46 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_48, [-1]), kwargs = {})
# %where_44 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_44, %add_44, %where_43), kwargs = {})
# %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {})
# %add_45 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_44, %div_45), kwargs = {})
# %sub_144 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_45), kwargs = {})
# %clamp_min_46 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_144, 0), kwargs = {})
# %full_default_49 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_49 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_46, %full_default_49), kwargs = {})
# %sum_47 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_49, [-1]), kwargs = {})
# %where_45 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_45, %add_45, %where_44), kwargs = {})
# %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {})
# %add_46 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_45, %div_46), kwargs = {})
# %sub_147 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_46), kwargs = {})
# %clamp_min_47 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_147, 0), kwargs = {})
# %full_default_50 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_50 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_47, %full_default_50), kwargs = {})
# %sum_48 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_50, [-1]), kwargs = {})
# %where_46 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_46, %add_46, %where_45), kwargs = {})
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {})
# %sub_150 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_47), kwargs = {})
# %clamp_min_48 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_150, 0), kwargs = {})
# %full_default_51 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_51 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_48, %full_default_51), kwargs = {})
# %sum_49 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_51, [-1]), kwargs = {})
# %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {})
# %sub_153 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_48), kwargs = {})
# %clamp_min_49 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_153, 0), kwargs = {})
# %full_default_52 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_52 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_49, %full_default_52), kwargs = {})
# %sum_50 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_52, [-1]), kwargs = {})
# %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_156 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_156, 0), kwargs = {})
# %full_default_53 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_53 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_50, %full_default_53), kwargs = {})
# %sum_52 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_53, [-1]), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr4'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20(in_out_ptr0, in_out_ptr4, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr7, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (x0), xmask)
tmp23 = tl.load(in_out_ptr0 + (x0), xmask)
tmp24 = tl.load(in_ptr2 + (x0), xmask)
tmp56 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp67 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp73 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp49 * tmp25
tmp51 = tmp50 * tmp25
tmp52 = tmp23 + tmp51
tmp53 = tl.where(tmp22, tmp52, tmp23)
tmp54 = tmp51 * tmp25
tmp55 = tmp53 + tmp54
tmp57 = tmp56 * tmp25
tmp58 = tmp57 - tmp55
tmp59 = triton_helpers.maximum(tmp58, tmp1)
tmp60 = libdevice.pow(tmp59, tmp3)
tmp62 = tmp61 * tmp25
tmp63 = tmp62 - tmp55
tmp64 = triton_helpers.maximum(tmp63, tmp1)
tmp65 = libdevice.pow(tmp64, tmp3)
tmp66 = tmp60 + tmp65
tmp68 = tmp67 * tmp25
tmp69 = tmp68 - tmp55
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = libdevice.pow(tmp70, tmp3)
tmp72 = tmp66 + tmp71
tmp74 = tmp73 * tmp25
tmp75 = tmp74 - tmp55
tmp76 = triton_helpers.maximum(tmp75, tmp1)
tmp77 = libdevice.pow(tmp76, tmp3)
tmp78 = tmp72 + tmp77
tmp79 = tmp78 - tmp17
tmp80 = tmp79 * tmp20
tmp81 = tmp80 >= tmp1
tmp82 = tl.where(tmp81, tmp55, tmp53)
tmp83 = tmp54 * tmp25
tmp84 = tmp82 + tmp83
tmp85 = tmp57 - tmp84
tmp86 = triton_helpers.maximum(tmp85, tmp1)
tmp87 = libdevice.pow(tmp86, tmp3)
tmp88 = tmp62 - tmp84
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = libdevice.pow(tmp89, tmp3)
tmp91 = tmp87 + tmp90
tmp92 = tmp68 - tmp84
tmp93 = triton_helpers.maximum(tmp92, tmp1)
tmp94 = libdevice.pow(tmp93, tmp3)
tmp95 = tmp91 + tmp94
tmp96 = tmp74 - tmp84
tmp97 = triton_helpers.maximum(tmp96, tmp1)
tmp98 = libdevice.pow(tmp97, tmp3)
tmp99 = tmp95 + tmp98
tmp100 = tmp99 - tmp17
tmp101 = tmp100 * tmp20
tmp102 = tmp101 >= tmp1
tmp103 = tl.where(tmp102, tmp84, tmp82)
tmp104 = tmp83 * tmp25
tmp105 = tmp103 + tmp104
tmp106 = tmp57 - tmp105
tmp107 = triton_helpers.maximum(tmp106, tmp1)
tmp108 = libdevice.pow(tmp107, tmp3)
tmp109 = tmp62 - tmp105
tmp110 = triton_helpers.maximum(tmp109, tmp1)
tmp111 = libdevice.pow(tmp110, tmp3)
tmp112 = tmp108 + tmp111
tmp113 = tmp68 - tmp105
tmp114 = triton_helpers.maximum(tmp113, tmp1)
tmp115 = libdevice.pow(tmp114, tmp3)
tmp116 = tmp112 + tmp115
tmp117 = tmp74 - tmp105
tmp118 = triton_helpers.maximum(tmp117, tmp1)
tmp119 = libdevice.pow(tmp118, tmp3)
tmp120 = tmp116 + tmp119
tmp121 = tmp120 - tmp17
tmp122 = tmp121 * tmp20
tmp123 = tmp122 >= tmp1
tmp124 = tl.where(tmp123, tmp105, tmp103)
tmp125 = tmp104 * tmp25
tmp126 = tmp124 + tmp125
tmp127 = tmp57 - tmp126
tmp128 = triton_helpers.maximum(tmp127, tmp1)
tmp129 = libdevice.pow(tmp128, tmp3)
tmp130 = tmp62 - tmp126
tmp131 = triton_helpers.maximum(tmp130, tmp1)
tmp132 = libdevice.pow(tmp131, tmp3)
tmp133 = tmp129 + tmp132
tmp134 = tmp68 - tmp126
tmp135 = triton_helpers.maximum(tmp134, tmp1)
tmp136 = libdevice.pow(tmp135, tmp3)
tmp137 = tmp133 + tmp136
tmp138 = tmp74 - tmp126
tmp139 = triton_helpers.maximum(tmp138, tmp1)
tmp140 = libdevice.pow(tmp139, tmp3)
tmp141 = tmp137 + tmp140
tmp142 = tmp141 - tmp17
tmp143 = tmp142 * tmp20
tmp144 = tmp143 >= tmp1
tmp145 = tl.where(tmp144, tmp126, tmp124)
tmp146 = tmp125 * tmp25
tmp147 = tmp145 + tmp146
tmp148 = tmp57 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp1)
tmp150 = libdevice.pow(tmp149, tmp3)
tmp151 = tmp62 - tmp147
tmp152 = triton_helpers.maximum(tmp151, tmp1)
tmp153 = libdevice.pow(tmp152, tmp3)
tmp154 = tmp150 + tmp153
tmp155 = tmp68 - tmp147
tmp156 = triton_helpers.maximum(tmp155, tmp1)
tmp157 = libdevice.pow(tmp156, tmp3)
tmp158 = tmp154 + tmp157
tmp159 = tmp74 - tmp147
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = libdevice.pow(tmp160, tmp3)
tmp162 = tmp158 + tmp161
tmp163 = tmp162 - tmp17
tmp164 = tmp163 * tmp20
tmp165 = tmp164 >= tmp1
tmp166 = tl.where(tmp165, tmp147, tmp145)
tmp167 = tmp146 * tmp25
tmp168 = tmp166 + tmp167
tmp169 = tmp57 - tmp168
tmp170 = triton_helpers.maximum(tmp169, tmp1)
tmp171 = libdevice.pow(tmp170, tmp3)
tmp172 = tmp62 - tmp168
tmp173 = triton_helpers.maximum(tmp172, tmp1)
tmp174 = libdevice.pow(tmp173, tmp3)
tmp175 = tmp171 + tmp174
tmp176 = tmp68 - tmp168
tmp177 = triton_helpers.maximum(tmp176, tmp1)
tmp178 = libdevice.pow(tmp177, tmp3)
tmp179 = tmp175 + tmp178
tmp180 = tmp74 - tmp168
tmp181 = triton_helpers.maximum(tmp180, tmp1)
tmp182 = libdevice.pow(tmp181, tmp3)
tmp183 = tmp179 + tmp182
tmp184 = tmp183 - tmp17
tmp185 = tmp184 * tmp20
tmp186 = tmp185 >= tmp1
tmp187 = tl.where(tmp186, tmp168, tmp166)
tmp188 = tmp167 * tmp25
tmp189 = tmp187 + tmp188
tmp190 = tmp57 - tmp189
tmp191 = triton_helpers.maximum(tmp190, tmp1)
tmp192 = libdevice.pow(tmp191, tmp3)
tmp193 = tmp62 - tmp189
tmp194 = triton_helpers.maximum(tmp193, tmp1)
tmp195 = libdevice.pow(tmp194, tmp3)
tmp196 = tmp192 + tmp195
tmp197 = tmp68 - tmp189
tmp198 = triton_helpers.maximum(tmp197, tmp1)
tmp199 = libdevice.pow(tmp198, tmp3)
tmp200 = tmp196 + tmp199
tmp201 = tmp74 - tmp189
tmp202 = triton_helpers.maximum(tmp201, tmp1)
tmp203 = libdevice.pow(tmp202, tmp3)
tmp204 = tmp200 + tmp203
tmp205 = tmp204 - tmp17
tmp206 = tmp205 * tmp20
tmp207 = tmp206 >= tmp1
tmp208 = tl.where(tmp207, tmp189, tmp187)
tmp209 = tmp188 * tmp25
tmp210 = tmp208 + tmp209
tmp211 = tmp57 - tmp210
tmp212 = triton_helpers.maximum(tmp211, tmp1)
tmp213 = libdevice.pow(tmp212, tmp3)
tmp214 = tmp62 - tmp210
tmp215 = triton_helpers.maximum(tmp214, tmp1)
tmp216 = libdevice.pow(tmp215, tmp3)
tmp217 = tmp213 + tmp216
tmp218 = tmp68 - tmp210
tmp219 = triton_helpers.maximum(tmp218, tmp1)
tmp220 = libdevice.pow(tmp219, tmp3)
tmp221 = tmp217 + tmp220
tmp222 = tmp74 - tmp210
tmp223 = triton_helpers.maximum(tmp222, tmp1)
tmp224 = libdevice.pow(tmp223, tmp3)
tmp225 = tmp221 + tmp224
tmp226 = tmp225 - tmp17
tmp227 = tmp226 * tmp20
tmp228 = tmp227 >= tmp1
tmp229 = tl.where(tmp228, tmp210, tmp208)
tmp230 = tmp209 * tmp25
tmp231 = tmp229 + tmp230
tmp232 = tmp57 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp1)
tmp234 = libdevice.pow(tmp233, tmp3)
tmp235 = tmp62 - tmp231
tmp236 = triton_helpers.maximum(tmp235, tmp1)
tmp237 = libdevice.pow(tmp236, tmp3)
tmp238 = tmp234 + tmp237
tmp239 = tmp68 - tmp231
tmp240 = triton_helpers.maximum(tmp239, tmp1)
tmp241 = libdevice.pow(tmp240, tmp3)
tmp242 = tmp238 + tmp241
tmp243 = tmp74 - tmp231
tmp244 = triton_helpers.maximum(tmp243, tmp1)
tmp245 = libdevice.pow(tmp244, tmp3)
tmp246 = tmp242 + tmp245
tl.store(in_out_ptr4 + (x0), tmp231, xmask)
tl.store(out_ptr7 + (x0), tmp246, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pm/cpmji3aw76p4a3ywro7icjwdbgchdizzkcjfhno53435xsk6mwfd.py
# Topologically Sorted Source Nodes: [sub, X, dm_45, dm_46, dm_47, dm_48, tau_m_47, tau_lo_48, dm_49, tau_m_48, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, p_m_50], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow]
# Source node to ATen node mapping:
# X => mul
# clamp_50 => clamp_min_50
# dm_45 => div_44
# dm_46 => div_45
# dm_47 => div_46
# dm_48 => div_47
# dm_49 => div_48
# dm_50 => div_49
# p_m_49 => pow_53
# p_m_50 => div_50
# sub => full_default
# sub_156 => sub_156
# tau_lo_48 => where_47
# tau_lo_49 => where_48
# tau_m_47 => add_47
# tau_m_48 => add_48
# tau_m_49 => add_49
# truediv_50 => full_default_53
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=52] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %div_44 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_43, 2), kwargs = {})
# %div_45 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_44, 2), kwargs = {})
# %div_46 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_45, 2), kwargs = {})
# %div_47 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_46, 2), kwargs = {})
# %add_47 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_46, %div_47), kwargs = {})
# %where_47 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_47, %add_47, %where_46), kwargs = {})
# %div_48 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_47, 2), kwargs = {})
# %add_48 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_47, %div_48), kwargs = {})
# %where_48 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%unsqueeze_48, %add_48, %where_47), kwargs = {})
# %div_49 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%div_48, 2), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_48, %div_49), kwargs = {})
# %sub_156 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %add_49), kwargs = {})
# %clamp_min_50 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_156, 0), kwargs = {})
# %full_default_53 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 1], 2.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %pow_53 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Tensor](args = (%clamp_min_50, %full_default_53), kwargs = {})
# %div_50 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_53, %unsqueeze_50), kwargs = {})
triton_poi_fused_add_clamp_div_mul_pow_sub_where_21 = async_compile.triton('triton_poi_fused_add_clamp_div_mul_pow_sub_where_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_div_mul_pow_sub_where_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_where_21(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 2.0
tmp8 = libdevice.pow(tmp6, tmp7)
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf35 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf51 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf52 = reinterpret_tensor(buf51, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf51 # reuse
buf53 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, X, max_1, pow_2, tau_hi, pow_1, tau_lo, dm, dm_1, tau_m, sub_9, clamp_1, truediv_1, p_m, sum_2, sub_5, clamp, truediv, pow_3, sum_1, f_lo, tau_lo_1, dm_2, tau_m_1, sub_12, clamp_2, truediv_2, p_m_1, sum_3, f_m_1, mul_2, tau_lo_2, dm_3, tau_m_2, sub_15, clamp_3, truediv_3, p_m_2, sum_4, f_m_2, mul_3, tau_lo_3, dm_4, tau_m_3, sub_18, clamp_4, truediv_4, p_m_3, sum_5, f_m_3, mul_4, tau_lo_4, dm_5, tau_m_4, sub_21, clamp_5, truediv_5, p_m_4, sum_6, f_m_4, mul_5, tau_lo_5, dm_6, tau_m_5, sub_24, clamp_6, truediv_6, p_m_5, sum_7, f_m_5, mul_6, tau_lo_6, dm_7, tau_m_6, sub_27, clamp_7, truediv_7, p_m_6, sum_8, f_m_6, mul_7, tau_lo_7, dm_8, tau_m_7, sub_30, clamp_8, truediv_8, p_m_7, sum_9, f_m_7, mul_8, tau_lo_8, dm_9, tau_m_8, sub_33, clamp_9, truediv_9, p_m_8, sum_10, f_m_8, mul_9, tau_lo_9, dm_10, tau_m_9, sub_36, clamp_10, truediv_10, p_m_9, sum_11, f_m_9, mul_10, tau_lo_10, dm_11, tau_m_10, sub_39, clamp_11, truediv_11, p_m_10, sum_12, f_m_10, mul_11, tau_lo_11, dm_12, tau_m_11, sub_42, clamp_12, truediv_12, p_m_11, sum_13, f_m_11, mul_12, tau_lo_12, dm_13, tau_m_12, sub_45, clamp_13, truediv_13, p_m_12, sum_14, f_m_12, mul_13, tau_lo_13, dm_14, tau_m_13, sub_48, clamp_14, truediv_14, p_m_13, sum_15, f_m_13, mul_14, tau_lo_14, dm_15, tau_m_14, sub_51, clamp_15, truediv_15, p_m_14, sum_16, tau_lo_15, dm_16, tau_m_15, sub_54, clamp_16, truediv_16, p_m_15, sum_17, f_m_15, tau_lo_16, dm_17, tau_m_16, sub_57, clamp_17, truediv_17, p_m_16, sum_18, tau_lo_17, dm_18, tau_m_17, sub_60, clamp_18, truediv_18, p_m_17, sum_19, tau_lo_18, dm_19, tau_m_18, sub_63, clamp_19, truediv_19, p_m_18, sum_20, tau_lo_19, dm_20, tau_m_19, sub_66, clamp_20, truediv_20, p_m_19, sum_21, tau_lo_20, dm_21, tau_m_20, sub_69, clamp_21, truediv_21, p_m_20, sum_22, tau_lo_21, dm_22, tau_m_21, sub_72, clamp_22, truediv_22, p_m_21, sum_23, tau_lo_22, dm_23, tau_m_22, sub_75, clamp_23, truediv_23, p_m_22, sum_24, tau_lo_23, dm_24, tau_m_23, sub_78, clamp_24, truediv_24, p_m_23, sum_25, tau_lo_24, dm_25, tau_m_24, sub_81, clamp_25, truediv_25, p_m_24, sum_26, tau_lo_25, dm_26, tau_m_25, sub_84, clamp_26, truediv_26, p_m_25, sum_27], Original ATen: [aten.sub, aten.mul, aten.max, aten.pow, aten.div, aten.add, aten.clamp, aten.sum, aten.where]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0.run(buf52, arg0_1, buf1, buf35, buf53, 64, grid=grid(64), stream=stream0)
buf54 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, sub_87, clamp_27, truediv_27, p_m_26], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow]
triton_poi_fused_add_clamp_div_mul_pow_sub_where_1.run(arg0_1, buf53, buf1, buf52, buf35, buf54, 256, grid=grid(256), stream=stream0)
buf55 = buf52; del buf52 # reuse
buf56 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, tau_m_25, tau_lo_26, dm_27, tau_m_26, tau_lo_27, dm_28, tau_m_27, sub_90, clamp_28, truediv_28, p_m_27, sum_29], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2.run(buf55, buf54, buf1, buf53, buf35, arg0_1, buf56, 64, grid=grid(64), stream=stream0)
buf57 = buf54; del buf54 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, sub_93], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where]
triton_poi_fused_add_div_mul_sub_where_3.run(arg0_1, buf56, buf1, buf55, buf35, buf57, 256, grid=grid(256), stream=stream0)
buf58 = buf55; del buf55 # reuse
buf59 = buf53; del buf53 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, tau_m_27, tau_lo_28, dm_29, tau_m_28, tau_lo_29, dm_30, tau_m_29, sub_96, clamp_30, truediv_30, p_m_29, sum_31], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4.run(buf58, buf57, buf1, buf56, buf35, arg0_1, buf59, 64, grid=grid(64), stream=stream0)
buf60 = buf57; del buf57 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, sub_99], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where]
triton_poi_fused_add_div_mul_sub_where_5.run(arg0_1, buf59, buf1, buf58, buf35, buf60, 256, grid=grid(256), stream=stream0)
buf61 = buf58; del buf58 # reuse
buf66 = buf56; del buf56 # reuse
buf67 = reinterpret_tensor(buf66, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf66 # reuse
# Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, tau_m_29, tau_lo_30, dm_31, tau_m_30, tau_lo_31, dm_32, tau_m_31, sub_102, clamp_32, truediv_32, p_m_31, sum_33, tau_lo_32, dm_33, tau_m_32, sub_105, clamp_33, truediv_33, p_m_32, sum_34, f_m_32, mul_33, tau_lo_33, dm_34, tau_m_33, sub_108, clamp_34, truediv_34, p_m_33, sum_35, tau_lo_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6.run(buf61, buf67, buf60, buf1, buf59, buf35, arg0_1, 64, grid=grid(64), stream=stream0)
buf68 = buf60; del buf60 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, sub_111, clamp_35, truediv_35, p_m_34], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow]
triton_poi_fused_add_clamp_div_mul_pow_sub_7.run(arg0_1, buf67, buf35, buf68, 256, grid=grid(256), stream=stream0)
buf69 = buf67; del buf67 # reuse
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, tau_m_34, tau_lo_35], Original ATen: [aten.div, aten.add, aten.where]
triton_poi_fused_add_div_where_8.run(buf69, buf68, buf1, buf35, 64, grid=grid(64), stream=stream0)
buf70 = buf68; del buf68 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, sub_114, clamp_36, truediv_36, p_m_35], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp, aten.pow]
triton_poi_fused_add_clamp_div_mul_pow_sub_9.run(arg0_1, buf69, buf35, buf70, 256, grid=grid(256), stream=stream0)
buf71 = buf69; del buf69 # reuse
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, tau_m_35, tau_lo_36], Original ATen: [aten.div, aten.add, aten.where]
triton_poi_fused_add_div_where_10.run(buf71, buf70, buf1, buf35, 64, grid=grid(64), stream=stream0)
buf72 = buf70; del buf70 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, sub_117, clamp_37], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp]
triton_poi_fused_add_clamp_div_mul_sub_11.run(arg0_1, buf71, buf35, buf72, 256, grid=grid(256), stream=stream0)
buf73 = buf71; del buf71 # reuse
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, tau_m_36, tau_lo_37], Original ATen: [aten.div, aten.add, aten.where]
triton_poi_fused_add_div_where_12.run(buf73, buf72, buf1, buf35, 64, grid=grid(64), stream=stream0)
buf74 = buf72; del buf72 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, sub_120, clamp_38], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.clamp]
triton_poi_fused_add_clamp_div_mul_sub_13.run(arg0_1, buf73, buf35, buf74, 256, grid=grid(256), stream=stream0)
buf75 = buf73; del buf73 # reuse
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, tau_m_37, tau_lo_38], Original ATen: [aten.div, aten.add, aten.where]
triton_poi_fused_add_div_where_14.run(buf75, buf74, buf1, buf35, 64, grid=grid(64), stream=stream0)
buf76 = buf74; del buf74 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, sub_123], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
triton_poi_fused_add_div_mul_sub_15.run(arg0_1, buf75, buf35, buf76, 256, grid=grid(256), stream=stream0)
buf77 = buf75; del buf75 # reuse
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, tau_m_38, tau_lo_39], Original ATen: [aten.div, aten.add, aten.where]
triton_poi_fused_add_div_where_16.run(buf77, buf76, buf1, buf35, 64, grid=grid(64), stream=stream0)
buf78 = buf76; del buf76 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, sub_126], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
triton_poi_fused_add_div_mul_sub_17.run(arg0_1, buf77, buf35, buf78, 256, grid=grid(256), stream=stream0)
buf79 = buf77; del buf77 # reuse
# Topologically Sorted Source Nodes: [dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, tau_m_39, tau_lo_40], Original ATen: [aten.div, aten.add, aten.where]
triton_poi_fused_add_div_where_18.run(buf79, buf78, buf1, buf35, 64, grid=grid(64), stream=stream0)
buf80 = buf78; del buf78 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, sub_129], Original ATen: [aten.sub, aten.mul, aten.div, aten.add]
triton_poi_fused_add_div_mul_sub_19.run(arg0_1, buf79, buf35, buf80, 256, grid=grid(256), stream=stream0)
buf81 = buf79; del buf79 # reuse
buf95 = reinterpret_tensor(buf61, (4, 4, 4), (16, 4, 1), 0); del buf61 # reuse
buf96 = reinterpret_tensor(buf95, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf95 # reuse
buf97 = buf59; del buf59 # reuse
# Topologically Sorted Source Nodes: [sub, X, f_lo, dm_16, dm_17, dm_18, dm_19, dm_20, dm_21, dm_22, dm_23, dm_24, dm_25, dm_26, dm_27, dm_28, dm_29, dm_30, dm_31, dm_32, dm_33, dm_34, dm_35, dm_36, dm_37, dm_38, dm_39, dm_40, dm_41, tau_m_40, tau_lo_41, dm_42, tau_m_41, sub_132, clamp_42, truediv_42, p_m_41, sum_43, f_m_41, mul_42, tau_lo_42, dm_43, tau_m_42, sub_135, clamp_43, truediv_43, p_m_42, sum_44, f_m_42, mul_43, tau_lo_43, dm_44, tau_m_43, sub_138, clamp_44, truediv_44, p_m_43, sum_45, f_m_43, tau_lo_44, dm_45, tau_m_44, sub_141, clamp_45, truediv_45, p_m_44, sum_46, tau_lo_45, dm_46, tau_m_45, sub_144, clamp_46, truediv_46, p_m_45, sum_47, tau_lo_46, dm_47, tau_m_46, sub_147, clamp_47, truediv_47, p_m_46, sum_48, tau_lo_47, dm_48, tau_m_47, sub_150, clamp_48, truediv_48, p_m_47, sum_49, tau_lo_48, dm_49, tau_m_48, sub_153, clamp_49, truediv_49, p_m_48, sum_50, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, sum_52], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow, aten.sum]
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20.run(buf81, buf96, buf80, buf1, buf35, arg0_1, buf97, 64, grid=grid(64), stream=stream0)
del buf1
del buf35
del buf81
buf98 = buf80; del buf80 # reuse
# Topologically Sorted Source Nodes: [sub, X, dm_45, dm_46, dm_47, dm_48, tau_m_47, tau_lo_48, dm_49, tau_m_48, tau_lo_49, dm_50, tau_m_49, sub_156, clamp_50, truediv_50, p_m_49, p_m_50], Original ATen: [aten.sub, aten.mul, aten.div, aten.add, aten.where, aten.clamp, aten.pow]
triton_poi_fused_add_clamp_div_mul_pow_sub_where_21.run(arg0_1, buf96, buf97, buf98, 256, grid=grid(256), stream=stream0)
del arg0_1
del buf96
del buf97
return (buf98, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True):
"""alpha-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
This function is differentiable with respect to both X and alpha.
Parameters
----------
X : torch.Tensor
The input tensor.
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax, and alpha=1 corresponds to
softmax (but computing it this way is likely unstable).
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class EntmaxBisect(nn.Module):
def __init__(self, alpha=1.5, dim=-1, n_iter=50):
"""alpha-entmax: normalizing sparse map (a la softmax) via bisection.
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
Parameters
----------
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax; alpha=1 corresponds
to softmax (but computing it this way is likely unstable).
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
self.alpha = alpha
super().__init__()
def forward(self, X):
return entmax_bisect(X, alpha=self.alpha, dim=self.dim, n_iter=self
.n_iter)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0(in_out_ptr12,
in_ptr0, out_ptr0, out_ptr19, out_ptr27, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = triton_helpers.maximum(tmp2, tmp4)
tmp7 = tmp6 * tmp1
tmp8 = triton_helpers.maximum(tmp5, tmp7)
tmp10 = tmp9 * tmp1
tmp11 = triton_helpers.maximum(tmp8, tmp10)
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp14 = tmp11 - tmp1
tmp15 = tmp14 - tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp13 + tmp16
tmp18 = tmp2 - tmp17
tmp19 = 0.0
tmp20 = triton_helpers.maximum(tmp18, tmp19)
tmp21 = 2.0
tmp22 = libdevice.pow(tmp20, tmp21)
tmp23 = tmp4 - tmp17
tmp24 = triton_helpers.maximum(tmp23, tmp19)
tmp25 = libdevice.pow(tmp24, tmp21)
tmp26 = tmp22 + tmp25
tmp27 = tmp7 - tmp17
tmp28 = triton_helpers.maximum(tmp27, tmp19)
tmp29 = libdevice.pow(tmp28, tmp21)
tmp30 = tmp26 + tmp29
tmp31 = tmp10 - tmp17
tmp32 = triton_helpers.maximum(tmp31, tmp19)
tmp33 = libdevice.pow(tmp32, tmp21)
tmp34 = tmp30 + tmp33
tmp35 = tmp2 - tmp13
tmp36 = triton_helpers.maximum(tmp35, tmp19)
tmp37 = libdevice.pow(tmp36, tmp21)
tmp38 = tmp4 - tmp13
tmp39 = triton_helpers.maximum(tmp38, tmp19)
tmp40 = libdevice.pow(tmp39, tmp21)
tmp41 = tmp37 + tmp40
tmp42 = tmp7 - tmp13
tmp43 = triton_helpers.maximum(tmp42, tmp19)
tmp44 = libdevice.pow(tmp43, tmp21)
tmp45 = tmp41 + tmp44
tmp46 = tmp10 - tmp13
tmp47 = triton_helpers.maximum(tmp46, tmp19)
tmp48 = libdevice.pow(tmp47, tmp21)
tmp49 = tmp45 + tmp48
tmp50 = tmp34 - tmp12
tmp51 = tmp49 - tmp12
tmp52 = tmp50 * tmp51
tmp53 = tmp52 >= tmp19
tmp54 = tl.where(tmp53, tmp17, tmp13)
tmp55 = tmp16 * tmp1
tmp56 = tmp54 + tmp55
tmp57 = tmp2 - tmp56
tmp58 = triton_helpers.maximum(tmp57, tmp19)
tmp59 = libdevice.pow(tmp58, tmp21)
tmp60 = tmp4 - tmp56
tmp61 = triton_helpers.maximum(tmp60, tmp19)
tmp62 = libdevice.pow(tmp61, tmp21)
tmp63 = tmp59 + tmp62
tmp64 = tmp7 - tmp56
tmp65 = triton_helpers.maximum(tmp64, tmp19)
tmp66 = libdevice.pow(tmp65, tmp21)
tmp67 = tmp63 + tmp66
tmp68 = tmp10 - tmp56
tmp69 = triton_helpers.maximum(tmp68, tmp19)
tmp70 = libdevice.pow(tmp69, tmp21)
tmp71 = tmp67 + tmp70
tmp72 = tmp71 - tmp12
tmp73 = tmp72 * tmp51
tmp74 = tmp73 >= tmp19
tmp75 = tl.where(tmp74, tmp56, tmp54)
tmp76 = tmp55 * tmp1
tmp77 = tmp75 + tmp76
tmp78 = tmp2 - tmp77
tmp79 = triton_helpers.maximum(tmp78, tmp19)
tmp80 = libdevice.pow(tmp79, tmp21)
tmp81 = tmp4 - tmp77
tmp82 = triton_helpers.maximum(tmp81, tmp19)
tmp83 = libdevice.pow(tmp82, tmp21)
tmp84 = tmp80 + tmp83
tmp85 = tmp7 - tmp77
tmp86 = triton_helpers.maximum(tmp85, tmp19)
tmp87 = libdevice.pow(tmp86, tmp21)
tmp88 = tmp84 + tmp87
tmp89 = tmp10 - tmp77
tmp90 = triton_helpers.maximum(tmp89, tmp19)
tmp91 = libdevice.pow(tmp90, tmp21)
tmp92 = tmp88 + tmp91
tmp93 = tmp92 - tmp12
tmp94 = tmp93 * tmp51
tmp95 = tmp94 >= tmp19
tmp96 = tl.where(tmp95, tmp77, tmp75)
tmp97 = tmp76 * tmp1
tmp98 = tmp96 + tmp97
tmp99 = tmp2 - tmp98
tmp100 = triton_helpers.maximum(tmp99, tmp19)
tmp101 = libdevice.pow(tmp100, tmp21)
tmp102 = tmp4 - tmp98
tmp103 = triton_helpers.maximum(tmp102, tmp19)
tmp104 = libdevice.pow(tmp103, tmp21)
tmp105 = tmp101 + tmp104
tmp106 = tmp7 - tmp98
tmp107 = triton_helpers.maximum(tmp106, tmp19)
tmp108 = libdevice.pow(tmp107, tmp21)
tmp109 = tmp105 + tmp108
tmp110 = tmp10 - tmp98
tmp111 = triton_helpers.maximum(tmp110, tmp19)
tmp112 = libdevice.pow(tmp111, tmp21)
tmp113 = tmp109 + tmp112
tmp114 = tmp113 - tmp12
tmp115 = tmp114 * tmp51
tmp116 = tmp115 >= tmp19
tmp117 = tl.where(tmp116, tmp98, tmp96)
tmp118 = tmp97 * tmp1
tmp119 = tmp117 + tmp118
tmp120 = tmp2 - tmp119
tmp121 = triton_helpers.maximum(tmp120, tmp19)
tmp122 = libdevice.pow(tmp121, tmp21)
tmp123 = tmp4 - tmp119
tmp124 = triton_helpers.maximum(tmp123, tmp19)
tmp125 = libdevice.pow(tmp124, tmp21)
tmp126 = tmp122 + tmp125
tmp127 = tmp7 - tmp119
tmp128 = triton_helpers.maximum(tmp127, tmp19)
tmp129 = libdevice.pow(tmp128, tmp21)
tmp130 = tmp126 + tmp129
tmp131 = tmp10 - tmp119
tmp132 = triton_helpers.maximum(tmp131, tmp19)
tmp133 = libdevice.pow(tmp132, tmp21)
tmp134 = tmp130 + tmp133
tmp135 = tmp134 - tmp12
tmp136 = tmp135 * tmp51
tmp137 = tmp136 >= tmp19
tmp138 = tl.where(tmp137, tmp119, tmp117)
tmp139 = tmp118 * tmp1
tmp140 = tmp138 + tmp139
tmp141 = tmp2 - tmp140
tmp142 = triton_helpers.maximum(tmp141, tmp19)
tmp143 = libdevice.pow(tmp142, tmp21)
tmp144 = tmp4 - tmp140
tmp145 = triton_helpers.maximum(tmp144, tmp19)
tmp146 = libdevice.pow(tmp145, tmp21)
tmp147 = tmp143 + tmp146
tmp148 = tmp7 - tmp140
tmp149 = triton_helpers.maximum(tmp148, tmp19)
tmp150 = libdevice.pow(tmp149, tmp21)
tmp151 = tmp147 + tmp150
tmp152 = tmp10 - tmp140
tmp153 = triton_helpers.maximum(tmp152, tmp19)
tmp154 = libdevice.pow(tmp153, tmp21)
tmp155 = tmp151 + tmp154
tmp156 = tmp155 - tmp12
tmp157 = tmp156 * tmp51
tmp158 = tmp157 >= tmp19
tmp159 = tl.where(tmp158, tmp140, tmp138)
tmp160 = tmp139 * tmp1
tmp161 = tmp159 + tmp160
tmp162 = tmp2 - tmp161
tmp163 = triton_helpers.maximum(tmp162, tmp19)
tmp164 = libdevice.pow(tmp163, tmp21)
tmp165 = tmp4 - tmp161
tmp166 = triton_helpers.maximum(tmp165, tmp19)
tmp167 = libdevice.pow(tmp166, tmp21)
tmp168 = tmp164 + tmp167
tmp169 = tmp7 - tmp161
tmp170 = triton_helpers.maximum(tmp169, tmp19)
tmp171 = libdevice.pow(tmp170, tmp21)
tmp172 = tmp168 + tmp171
tmp173 = tmp10 - tmp161
tmp174 = triton_helpers.maximum(tmp173, tmp19)
tmp175 = libdevice.pow(tmp174, tmp21)
tmp176 = tmp172 + tmp175
tmp177 = tmp176 - tmp12
tmp178 = tmp177 * tmp51
tmp179 = tmp178 >= tmp19
tmp180 = tl.where(tmp179, tmp161, tmp159)
tmp181 = tmp160 * tmp1
tmp182 = tmp180 + tmp181
tmp183 = tmp2 - tmp182
tmp184 = triton_helpers.maximum(tmp183, tmp19)
tmp185 = libdevice.pow(tmp184, tmp21)
tmp186 = tmp4 - tmp182
tmp187 = triton_helpers.maximum(tmp186, tmp19)
tmp188 = libdevice.pow(tmp187, tmp21)
tmp189 = tmp185 + tmp188
tmp190 = tmp7 - tmp182
tmp191 = triton_helpers.maximum(tmp190, tmp19)
tmp192 = libdevice.pow(tmp191, tmp21)
tmp193 = tmp189 + tmp192
tmp194 = tmp10 - tmp182
tmp195 = triton_helpers.maximum(tmp194, tmp19)
tmp196 = libdevice.pow(tmp195, tmp21)
tmp197 = tmp193 + tmp196
tmp198 = tmp197 - tmp12
tmp199 = tmp198 * tmp51
tmp200 = tmp199 >= tmp19
tmp201 = tl.where(tmp200, tmp182, tmp180)
tmp202 = tmp181 * tmp1
tmp203 = tmp201 + tmp202
tmp204 = tmp2 - tmp203
tmp205 = triton_helpers.maximum(tmp204, tmp19)
tmp206 = libdevice.pow(tmp205, tmp21)
tmp207 = tmp4 - tmp203
tmp208 = triton_helpers.maximum(tmp207, tmp19)
tmp209 = libdevice.pow(tmp208, tmp21)
tmp210 = tmp206 + tmp209
tmp211 = tmp7 - tmp203
tmp212 = triton_helpers.maximum(tmp211, tmp19)
tmp213 = libdevice.pow(tmp212, tmp21)
tmp214 = tmp210 + tmp213
tmp215 = tmp10 - tmp203
tmp216 = triton_helpers.maximum(tmp215, tmp19)
tmp217 = libdevice.pow(tmp216, tmp21)
tmp218 = tmp214 + tmp217
tmp219 = tmp218 - tmp12
tmp220 = tmp219 * tmp51
tmp221 = tmp220 >= tmp19
tmp222 = tl.where(tmp221, tmp203, tmp201)
tmp223 = tmp202 * tmp1
tmp224 = tmp222 + tmp223
tmp225 = tmp2 - tmp224
tmp226 = triton_helpers.maximum(tmp225, tmp19)
tmp227 = libdevice.pow(tmp226, tmp21)
tmp228 = tmp4 - tmp224
tmp229 = triton_helpers.maximum(tmp228, tmp19)
tmp230 = libdevice.pow(tmp229, tmp21)
tmp231 = tmp227 + tmp230
tmp232 = tmp7 - tmp224
tmp233 = triton_helpers.maximum(tmp232, tmp19)
tmp234 = libdevice.pow(tmp233, tmp21)
tmp235 = tmp231 + tmp234
tmp236 = tmp10 - tmp224
tmp237 = triton_helpers.maximum(tmp236, tmp19)
tmp238 = libdevice.pow(tmp237, tmp21)
tmp239 = tmp235 + tmp238
tmp240 = tmp239 - tmp12
tmp241 = tmp240 * tmp51
tmp242 = tmp241 >= tmp19
tmp243 = tl.where(tmp242, tmp224, tmp222)
tmp244 = tmp223 * tmp1
tmp245 = tmp243 + tmp244
tmp246 = tmp2 - tmp245
tmp247 = triton_helpers.maximum(tmp246, tmp19)
tmp248 = libdevice.pow(tmp247, tmp21)
tmp249 = tmp4 - tmp245
tmp250 = triton_helpers.maximum(tmp249, tmp19)
tmp251 = libdevice.pow(tmp250, tmp21)
tmp252 = tmp248 + tmp251
tmp253 = tmp7 - tmp245
tmp254 = triton_helpers.maximum(tmp253, tmp19)
tmp255 = libdevice.pow(tmp254, tmp21)
tmp256 = tmp252 + tmp255
tmp257 = tmp10 - tmp245
tmp258 = triton_helpers.maximum(tmp257, tmp19)
tmp259 = libdevice.pow(tmp258, tmp21)
tmp260 = tmp256 + tmp259
tmp261 = tmp260 - tmp12
tmp262 = tmp261 * tmp51
tmp263 = tmp262 >= tmp19
tmp264 = tl.where(tmp263, tmp245, tmp243)
tmp265 = tmp244 * tmp1
tmp266 = tmp264 + tmp265
tmp267 = tmp2 - tmp266
tmp268 = triton_helpers.maximum(tmp267, tmp19)
tmp269 = libdevice.pow(tmp268, tmp21)
tmp270 = tmp4 - tmp266
tmp271 = triton_helpers.maximum(tmp270, tmp19)
tmp272 = libdevice.pow(tmp271, tmp21)
tmp273 = tmp269 + tmp272
tmp274 = tmp7 - tmp266
tmp275 = triton_helpers.maximum(tmp274, tmp19)
tmp276 = libdevice.pow(tmp275, tmp21)
tmp277 = tmp273 + tmp276
tmp278 = tmp10 - tmp266
tmp279 = triton_helpers.maximum(tmp278, tmp19)
tmp280 = libdevice.pow(tmp279, tmp21)
tmp281 = tmp277 + tmp280
tmp282 = tmp281 - tmp12
tmp283 = tmp282 * tmp51
tmp284 = tmp283 >= tmp19
tmp285 = tl.where(tmp284, tmp266, tmp264)
tmp286 = tmp265 * tmp1
tmp287 = tmp285 + tmp286
tmp288 = tmp2 - tmp287
tmp289 = triton_helpers.maximum(tmp288, tmp19)
tmp290 = libdevice.pow(tmp289, tmp21)
tmp291 = tmp4 - tmp287
tmp292 = triton_helpers.maximum(tmp291, tmp19)
tmp293 = libdevice.pow(tmp292, tmp21)
tmp294 = tmp290 + tmp293
tmp295 = tmp7 - tmp287
tmp296 = triton_helpers.maximum(tmp295, tmp19)
tmp297 = libdevice.pow(tmp296, tmp21)
tmp298 = tmp294 + tmp297
tmp299 = tmp10 - tmp287
tmp300 = triton_helpers.maximum(tmp299, tmp19)
tmp301 = libdevice.pow(tmp300, tmp21)
tmp302 = tmp298 + tmp301
tmp303 = tmp302 - tmp12
tmp304 = tmp303 * tmp51
tmp305 = tmp304 >= tmp19
tmp306 = tl.where(tmp305, tmp287, tmp285)
tmp307 = tmp286 * tmp1
tmp308 = tmp307 * tmp1
tmp309 = tmp306 + tmp307
tmp310 = tmp2 - tmp309
tmp311 = triton_helpers.maximum(tmp310, tmp19)
tmp312 = libdevice.pow(tmp311, tmp21)
tmp313 = tmp4 - tmp309
tmp314 = triton_helpers.maximum(tmp313, tmp19)
tmp315 = libdevice.pow(tmp314, tmp21)
tmp316 = tmp312 + tmp315
tmp317 = tmp7 - tmp309
tmp318 = triton_helpers.maximum(tmp317, tmp19)
tmp319 = libdevice.pow(tmp318, tmp21)
tmp320 = tmp316 + tmp319
tmp321 = tmp10 - tmp309
tmp322 = triton_helpers.maximum(tmp321, tmp19)
tmp323 = libdevice.pow(tmp322, tmp21)
tmp324 = tmp320 + tmp323
tmp325 = tmp324 - tmp12
tmp326 = tmp325 * tmp51
tmp327 = tmp326 >= tmp19
tmp328 = tl.where(tmp327, tmp309, tmp306)
tmp329 = tmp328 + tmp308
tmp330 = tmp2 - tmp329
tmp331 = triton_helpers.maximum(tmp330, tmp19)
tmp332 = libdevice.pow(tmp331, tmp21)
tmp333 = tmp4 - tmp329
tmp334 = triton_helpers.maximum(tmp333, tmp19)
tmp335 = libdevice.pow(tmp334, tmp21)
tmp336 = tmp332 + tmp335
tmp337 = tmp7 - tmp329
tmp338 = triton_helpers.maximum(tmp337, tmp19)
tmp339 = libdevice.pow(tmp338, tmp21)
tmp340 = tmp336 + tmp339
tmp341 = tmp10 - tmp329
tmp342 = triton_helpers.maximum(tmp341, tmp19)
tmp343 = libdevice.pow(tmp342, tmp21)
tmp344 = tmp340 + tmp343
tmp345 = tmp344 - tmp12
tmp346 = tmp345 * tmp51
tmp347 = tmp346 >= tmp19
tmp348 = tl.where(tmp347, tmp329, tmp328)
tmp349 = tmp308 * tmp1
tmp350 = tmp348 + tmp349
tmp351 = tmp2 - tmp350
tmp352 = triton_helpers.maximum(tmp351, tmp19)
tmp353 = libdevice.pow(tmp352, tmp21)
tmp354 = tmp4 - tmp350
tmp355 = triton_helpers.maximum(tmp354, tmp19)
tmp356 = libdevice.pow(tmp355, tmp21)
tmp357 = tmp353 + tmp356
tmp358 = tmp7 - tmp350
tmp359 = triton_helpers.maximum(tmp358, tmp19)
tmp360 = libdevice.pow(tmp359, tmp21)
tmp361 = tmp357 + tmp360
tmp362 = tmp10 - tmp350
tmp363 = triton_helpers.maximum(tmp362, tmp19)
tmp364 = libdevice.pow(tmp363, tmp21)
tmp365 = tmp361 + tmp364
tmp366 = tmp365 - tmp12
tmp367 = tmp366 * tmp51
tmp368 = tmp367 >= tmp19
tmp369 = tl.where(tmp368, tmp350, tmp348)
tmp370 = tmp349 * tmp1
tmp371 = tmp369 + tmp370
tmp372 = tmp2 - tmp371
tmp373 = triton_helpers.maximum(tmp372, tmp19)
tmp374 = libdevice.pow(tmp373, tmp21)
tmp375 = tmp4 - tmp371
tmp376 = triton_helpers.maximum(tmp375, tmp19)
tmp377 = libdevice.pow(tmp376, tmp21)
tmp378 = tmp374 + tmp377
tmp379 = tmp7 - tmp371
tmp380 = triton_helpers.maximum(tmp379, tmp19)
tmp381 = libdevice.pow(tmp380, tmp21)
tmp382 = tmp378 + tmp381
tmp383 = tmp10 - tmp371
tmp384 = triton_helpers.maximum(tmp383, tmp19)
tmp385 = libdevice.pow(tmp384, tmp21)
tmp386 = tmp382 + tmp385
tmp387 = tmp386 - tmp12
tmp388 = tmp387 * tmp51
tmp389 = tmp388 >= tmp19
tmp390 = tl.where(tmp389, tmp371, tmp369)
tmp391 = tmp370 * tmp1
tmp392 = tmp390 + tmp391
tmp393 = tmp2 - tmp392
tmp394 = triton_helpers.maximum(tmp393, tmp19)
tmp395 = libdevice.pow(tmp394, tmp21)
tmp396 = tmp4 - tmp392
tmp397 = triton_helpers.maximum(tmp396, tmp19)
tmp398 = libdevice.pow(tmp397, tmp21)
tmp399 = tmp395 + tmp398
tmp400 = tmp7 - tmp392
tmp401 = triton_helpers.maximum(tmp400, tmp19)
tmp402 = libdevice.pow(tmp401, tmp21)
tmp403 = tmp399 + tmp402
tmp404 = tmp10 - tmp392
tmp405 = triton_helpers.maximum(tmp404, tmp19)
tmp406 = libdevice.pow(tmp405, tmp21)
tmp407 = tmp403 + tmp406
tmp408 = tmp407 - tmp12
tmp409 = tmp408 * tmp51
tmp410 = tmp409 >= tmp19
tmp411 = tl.where(tmp410, tmp392, tmp390)
tmp412 = tmp391 * tmp1
tmp413 = tmp411 + tmp412
tmp414 = tmp2 - tmp413
tmp415 = triton_helpers.maximum(tmp414, tmp19)
tmp416 = libdevice.pow(tmp415, tmp21)
tmp417 = tmp4 - tmp413
tmp418 = triton_helpers.maximum(tmp417, tmp19)
tmp419 = libdevice.pow(tmp418, tmp21)
tmp420 = tmp416 + tmp419
tmp421 = tmp7 - tmp413
tmp422 = triton_helpers.maximum(tmp421, tmp19)
tmp423 = libdevice.pow(tmp422, tmp21)
tmp424 = tmp420 + tmp423
tmp425 = tmp10 - tmp413
tmp426 = triton_helpers.maximum(tmp425, tmp19)
tmp427 = libdevice.pow(tmp426, tmp21)
tmp428 = tmp424 + tmp427
tmp429 = tmp428 - tmp12
tmp430 = tmp429 * tmp51
tmp431 = tmp430 >= tmp19
tmp432 = tl.where(tmp431, tmp413, tmp411)
tmp433 = tmp412 * tmp1
tmp434 = tmp432 + tmp433
tmp435 = tmp2 - tmp434
tmp436 = triton_helpers.maximum(tmp435, tmp19)
tmp437 = libdevice.pow(tmp436, tmp21)
tmp438 = tmp4 - tmp434
tmp439 = triton_helpers.maximum(tmp438, tmp19)
tmp440 = libdevice.pow(tmp439, tmp21)
tmp441 = tmp437 + tmp440
tmp442 = tmp7 - tmp434
tmp443 = triton_helpers.maximum(tmp442, tmp19)
tmp444 = libdevice.pow(tmp443, tmp21)
tmp445 = tmp441 + tmp444
tmp446 = tmp10 - tmp434
tmp447 = triton_helpers.maximum(tmp446, tmp19)
tmp448 = libdevice.pow(tmp447, tmp21)
tmp449 = tmp445 + tmp448
tmp450 = tmp449 - tmp12
tmp451 = tmp450 * tmp51
tmp452 = tmp451 >= tmp19
tmp453 = tl.where(tmp452, tmp434, tmp432)
tmp454 = tmp433 * tmp1
tmp455 = tmp453 + tmp454
tmp456 = tmp2 - tmp455
tmp457 = triton_helpers.maximum(tmp456, tmp19)
tmp458 = libdevice.pow(tmp457, tmp21)
tmp459 = tmp4 - tmp455
tmp460 = triton_helpers.maximum(tmp459, tmp19)
tmp461 = libdevice.pow(tmp460, tmp21)
tmp462 = tmp458 + tmp461
tmp463 = tmp7 - tmp455
tmp464 = triton_helpers.maximum(tmp463, tmp19)
tmp465 = libdevice.pow(tmp464, tmp21)
tmp466 = tmp462 + tmp465
tmp467 = tmp10 - tmp455
tmp468 = triton_helpers.maximum(tmp467, tmp19)
tmp469 = libdevice.pow(tmp468, tmp21)
tmp470 = tmp466 + tmp469
tmp471 = tmp470 - tmp12
tmp472 = tmp471 * tmp51
tmp473 = tmp472 >= tmp19
tmp474 = tl.where(tmp473, tmp455, tmp453)
tmp475 = tmp454 * tmp1
tmp476 = tmp474 + tmp475
tmp477 = tmp2 - tmp476
tmp478 = triton_helpers.maximum(tmp477, tmp19)
tmp479 = libdevice.pow(tmp478, tmp21)
tmp480 = tmp4 - tmp476
tmp481 = triton_helpers.maximum(tmp480, tmp19)
tmp482 = libdevice.pow(tmp481, tmp21)
tmp483 = tmp479 + tmp482
tmp484 = tmp7 - tmp476
tmp485 = triton_helpers.maximum(tmp484, tmp19)
tmp486 = libdevice.pow(tmp485, tmp21)
tmp487 = tmp483 + tmp486
tmp488 = tmp10 - tmp476
tmp489 = triton_helpers.maximum(tmp488, tmp19)
tmp490 = libdevice.pow(tmp489, tmp21)
tmp491 = tmp487 + tmp490
tmp492 = tmp491 - tmp12
tmp493 = tmp492 * tmp51
tmp494 = tmp493 >= tmp19
tmp495 = tl.where(tmp494, tmp476, tmp474)
tmp496 = tmp475 * tmp1
tmp497 = tmp495 + tmp496
tmp498 = tmp2 - tmp497
tmp499 = triton_helpers.maximum(tmp498, tmp19)
tmp500 = libdevice.pow(tmp499, tmp21)
tmp501 = tmp4 - tmp497
tmp502 = triton_helpers.maximum(tmp501, tmp19)
tmp503 = libdevice.pow(tmp502, tmp21)
tmp504 = tmp500 + tmp503
tmp505 = tmp7 - tmp497
tmp506 = triton_helpers.maximum(tmp505, tmp19)
tmp507 = libdevice.pow(tmp506, tmp21)
tmp508 = tmp504 + tmp507
tmp509 = tmp10 - tmp497
tmp510 = triton_helpers.maximum(tmp509, tmp19)
tmp511 = libdevice.pow(tmp510, tmp21)
tmp512 = tmp508 + tmp511
tmp513 = tmp512 - tmp12
tmp514 = tmp513 * tmp51
tmp515 = tmp514 >= tmp19
tmp516 = tl.where(tmp515, tmp497, tmp495)
tmp517 = tmp496 * tmp1
tmp518 = tmp516 + tmp517
tmp519 = tmp2 - tmp518
tmp520 = triton_helpers.maximum(tmp519, tmp19)
tmp521 = libdevice.pow(tmp520, tmp21)
tmp522 = tmp4 - tmp518
tmp523 = triton_helpers.maximum(tmp522, tmp19)
tmp524 = libdevice.pow(tmp523, tmp21)
tmp525 = tmp521 + tmp524
tmp526 = tmp7 - tmp518
tmp527 = triton_helpers.maximum(tmp526, tmp19)
tmp528 = libdevice.pow(tmp527, tmp21)
tmp529 = tmp525 + tmp528
tmp530 = tmp10 - tmp518
tmp531 = triton_helpers.maximum(tmp530, tmp19)
tmp532 = libdevice.pow(tmp531, tmp21)
tmp533 = tmp529 + tmp532
tmp534 = tmp533 - tmp12
tmp535 = tmp534 * tmp51
tmp536 = tmp535 >= tmp19
tmp537 = tl.where(tmp536, tmp518, tmp516)
tmp538 = tmp517 * tmp1
tmp539 = tmp537 + tmp538
tmp540 = tmp2 - tmp539
tmp541 = triton_helpers.maximum(tmp540, tmp19)
tmp542 = libdevice.pow(tmp541, tmp21)
tmp543 = tmp4 - tmp539
tmp544 = triton_helpers.maximum(tmp543, tmp19)
tmp545 = libdevice.pow(tmp544, tmp21)
tmp546 = tmp542 + tmp545
tmp547 = tmp7 - tmp539
tmp548 = triton_helpers.maximum(tmp547, tmp19)
tmp549 = libdevice.pow(tmp548, tmp21)
tmp550 = tmp546 + tmp549
tmp551 = tmp10 - tmp539
tmp552 = triton_helpers.maximum(tmp551, tmp19)
tmp553 = libdevice.pow(tmp552, tmp21)
tmp554 = tmp550 + tmp553
tmp555 = tmp554 - tmp12
tmp556 = tmp555 * tmp51
tmp557 = tmp556 >= tmp19
tmp558 = tl.where(tmp557, tmp539, tmp537)
tmp559 = tmp538 * tmp1
tmp560 = tmp558 + tmp559
tmp561 = tmp2 - tmp560
tmp562 = triton_helpers.maximum(tmp561, tmp19)
tmp563 = libdevice.pow(tmp562, tmp21)
tmp564 = tmp4 - tmp560
tmp565 = triton_helpers.maximum(tmp564, tmp19)
tmp566 = libdevice.pow(tmp565, tmp21)
tmp567 = tmp563 + tmp566
tmp568 = tmp7 - tmp560
tmp569 = triton_helpers.maximum(tmp568, tmp19)
tmp570 = libdevice.pow(tmp569, tmp21)
tmp571 = tmp567 + tmp570
tmp572 = tmp10 - tmp560
tmp573 = triton_helpers.maximum(tmp572, tmp19)
tmp574 = libdevice.pow(tmp573, tmp21)
tmp575 = tmp571 + tmp574
tl.store(out_ptr0 + x0, tmp49, xmask)
tl.store(out_ptr19 + x0, tmp308, xmask)
tl.store(in_out_ptr12 + x0, tmp558, xmask)
tl.store(out_ptr27 + x0, tmp575, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_where_1(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp11 + tmp23
tmp25 = tl.where(tmp10, tmp24, tmp11)
tmp26 = tmp23 * tmp1
tmp27 = tmp25 + tmp26
tmp28 = tmp2 - tmp27
tmp29 = triton_helpers.maximum(tmp28, tmp9)
tmp30 = 2.0
tmp31 = libdevice.pow(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp14 = tl.load(in_ptr2 + x0, xmask)
tmp18 = tl.load(in_out_ptr0 + x0, xmask)
tmp19 = tl.load(in_ptr3 + x0, xmask)
tmp37 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp45 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp51 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp57 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp15 = tmp14 - tmp7
tmp16 = tmp15 * tmp10
tmp17 = tmp16 >= tmp12
tmp20 = 0.5
tmp21 = tmp19 * tmp20
tmp22 = tmp21 * tmp20
tmp23 = tmp22 * tmp20
tmp24 = tmp23 * tmp20
tmp25 = tmp24 * tmp20
tmp26 = tmp25 * tmp20
tmp27 = tmp26 * tmp20
tmp28 = tmp27 * tmp20
tmp29 = tmp28 * tmp20
tmp30 = tmp29 * tmp20
tmp31 = tmp30 * tmp20
tmp32 = tmp18 + tmp31
tmp33 = tl.where(tmp17, tmp32, tmp18)
tmp34 = tmp31 * tmp20
tmp35 = tmp33 + tmp34
tmp36 = tl.where(tmp13, tmp35, tmp33)
tmp38 = tmp37 * tmp20
tmp39 = tmp34 * tmp20
tmp40 = tmp36 + tmp39
tmp41 = tmp38 - tmp40
tmp42 = triton_helpers.maximum(tmp41, tmp12)
tmp43 = 2.0
tmp44 = libdevice.pow(tmp42, tmp43)
tmp46 = tmp45 * tmp20
tmp47 = tmp46 - tmp40
tmp48 = triton_helpers.maximum(tmp47, tmp12)
tmp49 = libdevice.pow(tmp48, tmp43)
tmp50 = tmp44 + tmp49
tmp52 = tmp51 * tmp20
tmp53 = tmp52 - tmp40
tmp54 = triton_helpers.maximum(tmp53, tmp12)
tmp55 = libdevice.pow(tmp54, tmp43)
tmp56 = tmp50 + tmp55
tmp58 = tmp57 * tmp20
tmp59 = tmp58 - tmp40
tmp60 = triton_helpers.maximum(tmp59, tmp12)
tmp61 = libdevice.pow(tmp60, tmp43)
tmp62 = tmp56 + tmp61
tl.store(in_out_ptr0 + x0, tmp36, xmask)
tl.store(out_ptr0 + x0, tmp62, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_where_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp11 + tmp25
tmp27 = tl.where(tmp10, tmp26, tmp11)
tmp28 = tmp25 * tmp1
tmp29 = tmp27 + tmp28
tmp30 = tmp2 - tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_ptr2 + x0, xmask)
tmp27 = tl.load(in_out_ptr0 + x0, xmask)
tmp28 = tl.load(in_ptr3 + x0, xmask)
tmp48 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp55 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp61 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp67 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp24 = tmp23 - tmp17
tmp25 = tmp24 * tmp20
tmp26 = tmp25 >= tmp1
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = tmp30 * tmp29
tmp32 = tmp31 * tmp29
tmp33 = tmp32 * tmp29
tmp34 = tmp33 * tmp29
tmp35 = tmp34 * tmp29
tmp36 = tmp35 * tmp29
tmp37 = tmp36 * tmp29
tmp38 = tmp37 * tmp29
tmp39 = tmp38 * tmp29
tmp40 = tmp39 * tmp29
tmp41 = tmp40 * tmp29
tmp42 = tmp41 * tmp29
tmp43 = tmp27 + tmp42
tmp44 = tl.where(tmp26, tmp43, tmp27)
tmp45 = tmp42 * tmp29
tmp46 = tmp44 + tmp45
tmp47 = tl.where(tmp22, tmp46, tmp44)
tmp49 = tmp48 * tmp29
tmp50 = tmp45 * tmp29
tmp51 = tmp47 + tmp50
tmp52 = tmp49 - tmp51
tmp53 = triton_helpers.maximum(tmp52, tmp1)
tmp54 = libdevice.pow(tmp53, tmp3)
tmp56 = tmp55 * tmp29
tmp57 = tmp56 - tmp51
tmp58 = triton_helpers.maximum(tmp57, tmp1)
tmp59 = libdevice.pow(tmp58, tmp3)
tmp60 = tmp54 + tmp59
tmp62 = tmp61 * tmp29
tmp63 = tmp62 - tmp51
tmp64 = triton_helpers.maximum(tmp63, tmp1)
tmp65 = libdevice.pow(tmp64, tmp3)
tmp66 = tmp60 + tmp65
tmp68 = tmp67 * tmp29
tmp69 = tmp68 - tmp51
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = libdevice.pow(tmp70, tmp3)
tmp72 = tmp66 + tmp71
tl.store(in_out_ptr0 + x0, tmp47, xmask)
tl.store(out_ptr0 + x0, tmp72, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_where_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp7 = tmp6 - tmp4
tmp8 = tmp5 * tmp7
tmp9 = 0.0
tmp10 = tmp8 >= tmp9
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp11 + tmp27
tmp29 = tl.where(tmp10, tmp28, tmp11)
tmp30 = tmp27 * tmp1
tmp31 = tmp29 + tmp30
tmp32 = tmp2 - tmp31
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6(in_out_ptr0,
in_out_ptr2, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_ptr2 + x0, xmask)
tmp27 = tl.load(in_out_ptr0 + x0, xmask)
tmp28 = tl.load(in_ptr3 + x0, xmask)
tmp50 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last')
tmp57 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp63 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp69 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp24 = tmp23 - tmp17
tmp25 = tmp24 * tmp20
tmp26 = tmp25 >= tmp1
tmp29 = 0.5
tmp30 = tmp28 * tmp29
tmp31 = tmp30 * tmp29
tmp32 = tmp31 * tmp29
tmp33 = tmp32 * tmp29
tmp34 = tmp33 * tmp29
tmp35 = tmp34 * tmp29
tmp36 = tmp35 * tmp29
tmp37 = tmp36 * tmp29
tmp38 = tmp37 * tmp29
tmp39 = tmp38 * tmp29
tmp40 = tmp39 * tmp29
tmp41 = tmp40 * tmp29
tmp42 = tmp41 * tmp29
tmp43 = tmp42 * tmp29
tmp44 = tmp43 * tmp29
tmp45 = tmp27 + tmp44
tmp46 = tl.where(tmp26, tmp45, tmp27)
tmp47 = tmp44 * tmp29
tmp48 = tmp46 + tmp47
tmp49 = tl.where(tmp22, tmp48, tmp46)
tmp51 = tmp50 * tmp29
tmp52 = tmp47 * tmp29
tmp53 = tmp49 + tmp52
tmp54 = tmp51 - tmp53
tmp55 = triton_helpers.maximum(tmp54, tmp1)
tmp56 = libdevice.pow(tmp55, tmp3)
tmp58 = tmp57 * tmp29
tmp59 = tmp58 - tmp53
tmp60 = triton_helpers.maximum(tmp59, tmp1)
tmp61 = libdevice.pow(tmp60, tmp3)
tmp62 = tmp56 + tmp61
tmp64 = tmp63 * tmp29
tmp65 = tmp64 - tmp53
tmp66 = triton_helpers.maximum(tmp65, tmp1)
tmp67 = libdevice.pow(tmp66, tmp3)
tmp68 = tmp62 + tmp67
tmp70 = tmp69 * tmp29
tmp71 = tmp70 - tmp53
tmp72 = triton_helpers.maximum(tmp71, tmp1)
tmp73 = libdevice.pow(tmp72, tmp3)
tmp74 = tmp68 + tmp73
tmp75 = tmp74 - tmp17
tmp76 = tmp75 * tmp20
tmp77 = tmp76 >= tmp1
tmp78 = tl.where(tmp77, tmp53, tmp49)
tmp79 = tmp52 * tmp29
tmp80 = tmp78 + tmp79
tmp81 = tmp51 - tmp80
tmp82 = triton_helpers.maximum(tmp81, tmp1)
tmp83 = libdevice.pow(tmp82, tmp3)
tmp84 = tmp58 - tmp80
tmp85 = triton_helpers.maximum(tmp84, tmp1)
tmp86 = libdevice.pow(tmp85, tmp3)
tmp87 = tmp83 + tmp86
tmp88 = tmp64 - tmp80
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = libdevice.pow(tmp89, tmp3)
tmp91 = tmp87 + tmp90
tmp92 = tmp70 - tmp80
tmp93 = triton_helpers.maximum(tmp92, tmp1)
tmp94 = libdevice.pow(tmp93, tmp3)
tmp95 = tmp91 + tmp94
tmp96 = tmp95 - tmp17
tmp97 = tmp96 * tmp20
tmp98 = tmp97 >= tmp1
tmp99 = tl.where(tmp98, tmp80, tmp78)
tmp100 = tmp79 * tmp29
tmp101 = tmp99 + tmp100
tmp102 = tmp51 - tmp101
tmp103 = triton_helpers.maximum(tmp102, tmp1)
tmp104 = libdevice.pow(tmp103, tmp3)
tmp105 = tmp58 - tmp101
tmp106 = triton_helpers.maximum(tmp105, tmp1)
tmp107 = libdevice.pow(tmp106, tmp3)
tmp108 = tmp104 + tmp107
tmp109 = tmp64 - tmp101
tmp110 = triton_helpers.maximum(tmp109, tmp1)
tmp111 = libdevice.pow(tmp110, tmp3)
tmp112 = tmp108 + tmp111
tmp113 = tmp70 - tmp101
tmp114 = triton_helpers.maximum(tmp113, tmp1)
tmp115 = libdevice.pow(tmp114, tmp3)
tmp116 = tmp112 + tmp115
tmp117 = tmp116 - tmp17
tmp118 = tmp117 * tmp20
tmp119 = tmp118 >= tmp1
tmp120 = tl.where(tmp119, tmp101, tmp99)
tl.store(in_out_ptr2 + x0, tmp120, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp3 + tmp24
tmp26 = tmp2 - tmp25
tmp27 = 0.0
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = 2.0
tmp30 = libdevice.pow(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_where_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp14 = tl.load(in_out_ptr0 + x0, xmask)
tmp15 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp19 * tmp16
tmp21 = tmp20 * tmp16
tmp22 = tmp21 * tmp16
tmp23 = tmp22 * tmp16
tmp24 = tmp23 * tmp16
tmp25 = tmp24 * tmp16
tmp26 = tmp25 * tmp16
tmp27 = tmp26 * tmp16
tmp28 = tmp27 * tmp16
tmp29 = tmp28 * tmp16
tmp30 = tmp29 * tmp16
tmp31 = tmp30 * tmp16
tmp32 = tmp31 * tmp16
tmp33 = tmp32 * tmp16
tmp34 = tmp33 * tmp16
tmp35 = tmp34 * tmp16
tmp36 = tmp35 * tmp16
tmp37 = tmp14 + tmp36
tmp38 = tl.where(tmp13, tmp37, tmp14)
tl.store(in_out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_9(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp3 + tmp25
tmp27 = tmp2 - tmp26
tmp28 = 0.0
tmp29 = triton_helpers.maximum(tmp27, tmp28)
tmp30 = 2.0
tmp31 = libdevice.pow(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_div_where_10(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + x0, xmask)
tmp14 = tl.load(in_out_ptr0 + x0, xmask)
tmp15 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 1.0
tmp8 = tmp6 - tmp7
tmp10 = tmp9 - tmp7
tmp11 = tmp8 * tmp10
tmp12 = 0.0
tmp13 = tmp11 >= tmp12
tmp16 = 0.5
tmp17 = tmp15 * tmp16
tmp18 = tmp17 * tmp16
tmp19 = tmp18 * tmp16
tmp20 = tmp19 * tmp16
tmp21 = tmp20 * tmp16
tmp22 = tmp21 * tmp16
tmp23 = tmp22 * tmp16
tmp24 = tmp23 * tmp16
tmp25 = tmp24 * tmp16
tmp26 = tmp25 * tmp16
tmp27 = tmp26 * tmp16
tmp28 = tmp27 * tmp16
tmp29 = tmp28 * tmp16
tmp30 = tmp29 * tmp16
tmp31 = tmp30 * tmp16
tmp32 = tmp31 * tmp16
tmp33 = tmp32 * tmp16
tmp34 = tmp33 * tmp16
tmp35 = tmp34 * tmp16
tmp36 = tmp35 * tmp16
tmp37 = tmp36 * tmp16
tmp38 = tmp14 + tmp37
tmp39 = tl.where(tmp13, tmp38, tmp14)
tl.store(in_out_ptr0 + x0, tmp39, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_sub_11(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp3 + tmp26
tmp28 = tmp2 - tmp27
tmp29 = 0.0
tmp30 = triton_helpers.maximum(tmp28, tmp29)
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_where_12(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + x0, xmask)
tmp19 = tl.load(in_out_ptr0 + x0, xmask)
tmp20 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 2.0
tmp2 = libdevice.pow(tmp0, tmp1)
tmp4 = libdevice.pow(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = libdevice.pow(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = libdevice.pow(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp15 = tmp14 - tmp12
tmp16 = tmp13 * tmp15
tmp17 = 0.0
tmp18 = tmp16 >= tmp17
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp24 * tmp21
tmp26 = tmp25 * tmp21
tmp27 = tmp26 * tmp21
tmp28 = tmp27 * tmp21
tmp29 = tmp28 * tmp21
tmp30 = tmp29 * tmp21
tmp31 = tmp30 * tmp21
tmp32 = tmp31 * tmp21
tmp33 = tmp32 * tmp21
tmp34 = tmp33 * tmp21
tmp35 = tmp34 * tmp21
tmp36 = tmp35 * tmp21
tmp37 = tmp36 * tmp21
tmp38 = tmp37 * tmp21
tmp39 = tmp38 * tmp21
tmp40 = tmp39 * tmp21
tmp41 = tmp40 * tmp21
tmp42 = tmp41 * tmp21
tmp43 = tmp42 * tmp21
tmp44 = tmp19 + tmp43
tmp45 = tl.where(tmp18, tmp44, tmp19)
tl.store(in_out_ptr0 + x0, tmp45, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_sub_13(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp3 + tmp27
tmp29 = tmp2 - tmp28
tmp30 = 0.0
tmp31 = triton_helpers.maximum(tmp29, tmp30)
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_div_where_14(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr1 + x0, xmask)
tmp19 = tl.load(in_out_ptr0 + x0, xmask)
tmp20 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 2.0
tmp2 = libdevice.pow(tmp0, tmp1)
tmp4 = libdevice.pow(tmp3, tmp1)
tmp5 = tmp2 + tmp4
tmp7 = libdevice.pow(tmp6, tmp1)
tmp8 = tmp5 + tmp7
tmp10 = libdevice.pow(tmp9, tmp1)
tmp11 = tmp8 + tmp10
tmp12 = 1.0
tmp13 = tmp11 - tmp12
tmp15 = tmp14 - tmp12
tmp16 = tmp13 * tmp15
tmp17 = 0.0
tmp18 = tmp16 >= tmp17
tmp21 = 0.5
tmp22 = tmp20 * tmp21
tmp23 = tmp22 * tmp21
tmp24 = tmp23 * tmp21
tmp25 = tmp24 * tmp21
tmp26 = tmp25 * tmp21
tmp27 = tmp26 * tmp21
tmp28 = tmp27 * tmp21
tmp29 = tmp28 * tmp21
tmp30 = tmp29 * tmp21
tmp31 = tmp30 * tmp21
tmp32 = tmp31 * tmp21
tmp33 = tmp32 * tmp21
tmp34 = tmp33 * tmp21
tmp35 = tmp34 * tmp21
tmp36 = tmp35 * tmp21
tmp37 = tmp36 * tmp21
tmp38 = tmp37 * tmp21
tmp39 = tmp38 * tmp21
tmp40 = tmp39 * tmp21
tmp41 = tmp40 * tmp21
tmp42 = tmp41 * tmp21
tmp43 = tmp42 * tmp21
tmp44 = tmp43 * tmp21
tmp45 = tmp19 + tmp44
tmp46 = tl.where(tmp18, tmp45, tmp19)
tl.store(in_out_ptr0 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_15(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp3 + tmp28
tmp30 = tmp2 - tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
@triton.jit
def triton_poi_fused_add_div_where_16(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_out_ptr0 + x0, xmask)
tmp24 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp23 + tmp49
tmp51 = tl.where(tmp22, tmp50, tmp23)
tl.store(in_out_ptr0 + x0, tmp51, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp28 * tmp1
tmp30 = tmp3 + tmp29
tmp31 = tmp2 - tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused_add_div_where_18(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_out_ptr0 + x0, xmask)
tmp24 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp49 * tmp25
tmp51 = tmp23 + tmp50
tmp52 = tl.where(tmp22, tmp51, tmp23)
tl.store(in_out_ptr0 + x0, tmp52, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_sub_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp5 = tmp4 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = tmp6 * tmp1
tmp8 = tmp7 * tmp1
tmp9 = tmp8 * tmp1
tmp10 = tmp9 * tmp1
tmp11 = tmp10 * tmp1
tmp12 = tmp11 * tmp1
tmp13 = tmp12 * tmp1
tmp14 = tmp13 * tmp1
tmp15 = tmp14 * tmp1
tmp16 = tmp15 * tmp1
tmp17 = tmp16 * tmp1
tmp18 = tmp17 * tmp1
tmp19 = tmp18 * tmp1
tmp20 = tmp19 * tmp1
tmp21 = tmp20 * tmp1
tmp22 = tmp21 * tmp1
tmp23 = tmp22 * tmp1
tmp24 = tmp23 * tmp1
tmp25 = tmp24 * tmp1
tmp26 = tmp25 * tmp1
tmp27 = tmp26 * tmp1
tmp28 = tmp27 * tmp1
tmp29 = tmp28 * tmp1
tmp30 = tmp29 * tmp1
tmp31 = tmp3 + tmp30
tmp32 = tmp2 - tmp31
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20(in_out_ptr0,
in_out_ptr4, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr7, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + x0, xmask)
tmp23 = tl.load(in_out_ptr0 + x0, xmask)
tmp24 = tl.load(in_ptr2 + x0, xmask)
tmp56 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp67 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp73 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 2.0
tmp4 = libdevice.pow(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = libdevice.pow(tmp6, tmp3)
tmp8 = tmp4 + tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = libdevice.pow(tmp10, tmp3)
tmp12 = tmp8 + tmp11
tmp14 = triton_helpers.maximum(tmp13, tmp1)
tmp15 = libdevice.pow(tmp14, tmp3)
tmp16 = tmp12 + tmp15
tmp17 = 1.0
tmp18 = tmp16 - tmp17
tmp20 = tmp19 - tmp17
tmp21 = tmp18 * tmp20
tmp22 = tmp21 >= tmp1
tmp25 = 0.5
tmp26 = tmp24 * tmp25
tmp27 = tmp26 * tmp25
tmp28 = tmp27 * tmp25
tmp29 = tmp28 * tmp25
tmp30 = tmp29 * tmp25
tmp31 = tmp30 * tmp25
tmp32 = tmp31 * tmp25
tmp33 = tmp32 * tmp25
tmp34 = tmp33 * tmp25
tmp35 = tmp34 * tmp25
tmp36 = tmp35 * tmp25
tmp37 = tmp36 * tmp25
tmp38 = tmp37 * tmp25
tmp39 = tmp38 * tmp25
tmp40 = tmp39 * tmp25
tmp41 = tmp40 * tmp25
tmp42 = tmp41 * tmp25
tmp43 = tmp42 * tmp25
tmp44 = tmp43 * tmp25
tmp45 = tmp44 * tmp25
tmp46 = tmp45 * tmp25
tmp47 = tmp46 * tmp25
tmp48 = tmp47 * tmp25
tmp49 = tmp48 * tmp25
tmp50 = tmp49 * tmp25
tmp51 = tmp50 * tmp25
tmp52 = tmp23 + tmp51
tmp53 = tl.where(tmp22, tmp52, tmp23)
tmp54 = tmp51 * tmp25
tmp55 = tmp53 + tmp54
tmp57 = tmp56 * tmp25
tmp58 = tmp57 - tmp55
tmp59 = triton_helpers.maximum(tmp58, tmp1)
tmp60 = libdevice.pow(tmp59, tmp3)
tmp62 = tmp61 * tmp25
tmp63 = tmp62 - tmp55
tmp64 = triton_helpers.maximum(tmp63, tmp1)
tmp65 = libdevice.pow(tmp64, tmp3)
tmp66 = tmp60 + tmp65
tmp68 = tmp67 * tmp25
tmp69 = tmp68 - tmp55
tmp70 = triton_helpers.maximum(tmp69, tmp1)
tmp71 = libdevice.pow(tmp70, tmp3)
tmp72 = tmp66 + tmp71
tmp74 = tmp73 * tmp25
tmp75 = tmp74 - tmp55
tmp76 = triton_helpers.maximum(tmp75, tmp1)
tmp77 = libdevice.pow(tmp76, tmp3)
tmp78 = tmp72 + tmp77
tmp79 = tmp78 - tmp17
tmp80 = tmp79 * tmp20
tmp81 = tmp80 >= tmp1
tmp82 = tl.where(tmp81, tmp55, tmp53)
tmp83 = tmp54 * tmp25
tmp84 = tmp82 + tmp83
tmp85 = tmp57 - tmp84
tmp86 = triton_helpers.maximum(tmp85, tmp1)
tmp87 = libdevice.pow(tmp86, tmp3)
tmp88 = tmp62 - tmp84
tmp89 = triton_helpers.maximum(tmp88, tmp1)
tmp90 = libdevice.pow(tmp89, tmp3)
tmp91 = tmp87 + tmp90
tmp92 = tmp68 - tmp84
tmp93 = triton_helpers.maximum(tmp92, tmp1)
tmp94 = libdevice.pow(tmp93, tmp3)
tmp95 = tmp91 + tmp94
tmp96 = tmp74 - tmp84
tmp97 = triton_helpers.maximum(tmp96, tmp1)
tmp98 = libdevice.pow(tmp97, tmp3)
tmp99 = tmp95 + tmp98
tmp100 = tmp99 - tmp17
tmp101 = tmp100 * tmp20
tmp102 = tmp101 >= tmp1
tmp103 = tl.where(tmp102, tmp84, tmp82)
tmp104 = tmp83 * tmp25
tmp105 = tmp103 + tmp104
tmp106 = tmp57 - tmp105
tmp107 = triton_helpers.maximum(tmp106, tmp1)
tmp108 = libdevice.pow(tmp107, tmp3)
tmp109 = tmp62 - tmp105
tmp110 = triton_helpers.maximum(tmp109, tmp1)
tmp111 = libdevice.pow(tmp110, tmp3)
tmp112 = tmp108 + tmp111
tmp113 = tmp68 - tmp105
tmp114 = triton_helpers.maximum(tmp113, tmp1)
tmp115 = libdevice.pow(tmp114, tmp3)
tmp116 = tmp112 + tmp115
tmp117 = tmp74 - tmp105
tmp118 = triton_helpers.maximum(tmp117, tmp1)
tmp119 = libdevice.pow(tmp118, tmp3)
tmp120 = tmp116 + tmp119
tmp121 = tmp120 - tmp17
tmp122 = tmp121 * tmp20
tmp123 = tmp122 >= tmp1
tmp124 = tl.where(tmp123, tmp105, tmp103)
tmp125 = tmp104 * tmp25
tmp126 = tmp124 + tmp125
tmp127 = tmp57 - tmp126
tmp128 = triton_helpers.maximum(tmp127, tmp1)
tmp129 = libdevice.pow(tmp128, tmp3)
tmp130 = tmp62 - tmp126
tmp131 = triton_helpers.maximum(tmp130, tmp1)
tmp132 = libdevice.pow(tmp131, tmp3)
tmp133 = tmp129 + tmp132
tmp134 = tmp68 - tmp126
tmp135 = triton_helpers.maximum(tmp134, tmp1)
tmp136 = libdevice.pow(tmp135, tmp3)
tmp137 = tmp133 + tmp136
tmp138 = tmp74 - tmp126
tmp139 = triton_helpers.maximum(tmp138, tmp1)
tmp140 = libdevice.pow(tmp139, tmp3)
tmp141 = tmp137 + tmp140
tmp142 = tmp141 - tmp17
tmp143 = tmp142 * tmp20
tmp144 = tmp143 >= tmp1
tmp145 = tl.where(tmp144, tmp126, tmp124)
tmp146 = tmp125 * tmp25
tmp147 = tmp145 + tmp146
tmp148 = tmp57 - tmp147
tmp149 = triton_helpers.maximum(tmp148, tmp1)
tmp150 = libdevice.pow(tmp149, tmp3)
tmp151 = tmp62 - tmp147
tmp152 = triton_helpers.maximum(tmp151, tmp1)
tmp153 = libdevice.pow(tmp152, tmp3)
tmp154 = tmp150 + tmp153
tmp155 = tmp68 - tmp147
tmp156 = triton_helpers.maximum(tmp155, tmp1)
tmp157 = libdevice.pow(tmp156, tmp3)
tmp158 = tmp154 + tmp157
tmp159 = tmp74 - tmp147
tmp160 = triton_helpers.maximum(tmp159, tmp1)
tmp161 = libdevice.pow(tmp160, tmp3)
tmp162 = tmp158 + tmp161
tmp163 = tmp162 - tmp17
tmp164 = tmp163 * tmp20
tmp165 = tmp164 >= tmp1
tmp166 = tl.where(tmp165, tmp147, tmp145)
tmp167 = tmp146 * tmp25
tmp168 = tmp166 + tmp167
tmp169 = tmp57 - tmp168
tmp170 = triton_helpers.maximum(tmp169, tmp1)
tmp171 = libdevice.pow(tmp170, tmp3)
tmp172 = tmp62 - tmp168
tmp173 = triton_helpers.maximum(tmp172, tmp1)
tmp174 = libdevice.pow(tmp173, tmp3)
tmp175 = tmp171 + tmp174
tmp176 = tmp68 - tmp168
tmp177 = triton_helpers.maximum(tmp176, tmp1)
tmp178 = libdevice.pow(tmp177, tmp3)
tmp179 = tmp175 + tmp178
tmp180 = tmp74 - tmp168
tmp181 = triton_helpers.maximum(tmp180, tmp1)
tmp182 = libdevice.pow(tmp181, tmp3)
tmp183 = tmp179 + tmp182
tmp184 = tmp183 - tmp17
tmp185 = tmp184 * tmp20
tmp186 = tmp185 >= tmp1
tmp187 = tl.where(tmp186, tmp168, tmp166)
tmp188 = tmp167 * tmp25
tmp189 = tmp187 + tmp188
tmp190 = tmp57 - tmp189
tmp191 = triton_helpers.maximum(tmp190, tmp1)
tmp192 = libdevice.pow(tmp191, tmp3)
tmp193 = tmp62 - tmp189
tmp194 = triton_helpers.maximum(tmp193, tmp1)
tmp195 = libdevice.pow(tmp194, tmp3)
tmp196 = tmp192 + tmp195
tmp197 = tmp68 - tmp189
tmp198 = triton_helpers.maximum(tmp197, tmp1)
tmp199 = libdevice.pow(tmp198, tmp3)
tmp200 = tmp196 + tmp199
tmp201 = tmp74 - tmp189
tmp202 = triton_helpers.maximum(tmp201, tmp1)
tmp203 = libdevice.pow(tmp202, tmp3)
tmp204 = tmp200 + tmp203
tmp205 = tmp204 - tmp17
tmp206 = tmp205 * tmp20
tmp207 = tmp206 >= tmp1
tmp208 = tl.where(tmp207, tmp189, tmp187)
tmp209 = tmp188 * tmp25
tmp210 = tmp208 + tmp209
tmp211 = tmp57 - tmp210
tmp212 = triton_helpers.maximum(tmp211, tmp1)
tmp213 = libdevice.pow(tmp212, tmp3)
tmp214 = tmp62 - tmp210
tmp215 = triton_helpers.maximum(tmp214, tmp1)
tmp216 = libdevice.pow(tmp215, tmp3)
tmp217 = tmp213 + tmp216
tmp218 = tmp68 - tmp210
tmp219 = triton_helpers.maximum(tmp218, tmp1)
tmp220 = libdevice.pow(tmp219, tmp3)
tmp221 = tmp217 + tmp220
tmp222 = tmp74 - tmp210
tmp223 = triton_helpers.maximum(tmp222, tmp1)
tmp224 = libdevice.pow(tmp223, tmp3)
tmp225 = tmp221 + tmp224
tmp226 = tmp225 - tmp17
tmp227 = tmp226 * tmp20
tmp228 = tmp227 >= tmp1
tmp229 = tl.where(tmp228, tmp210, tmp208)
tmp230 = tmp209 * tmp25
tmp231 = tmp229 + tmp230
tmp232 = tmp57 - tmp231
tmp233 = triton_helpers.maximum(tmp232, tmp1)
tmp234 = libdevice.pow(tmp233, tmp3)
tmp235 = tmp62 - tmp231
tmp236 = triton_helpers.maximum(tmp235, tmp1)
tmp237 = libdevice.pow(tmp236, tmp3)
tmp238 = tmp234 + tmp237
tmp239 = tmp68 - tmp231
tmp240 = triton_helpers.maximum(tmp239, tmp1)
tmp241 = libdevice.pow(tmp240, tmp3)
tmp242 = tmp238 + tmp241
tmp243 = tmp74 - tmp231
tmp244 = triton_helpers.maximum(tmp243, tmp1)
tmp245 = libdevice.pow(tmp244, tmp3)
tmp246 = tmp242 + tmp245
tl.store(in_out_ptr4 + x0, tmp231, xmask)
tl.store(out_ptr7 + x0, tmp246, xmask)
@triton.jit
def triton_poi_fused_add_clamp_div_mul_pow_sub_where_21(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp2 - tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 2.0
tmp8 = libdevice.pow(tmp6, tmp7)
tmp10 = tmp8 / tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf35 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf51 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf52 = reinterpret_tensor(buf51, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf51
buf53 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_clamp_div_max_mul_pow_sub_sum_where_0[grid(64)](
buf52, arg0_1, buf1, buf35, buf53, 64, XBLOCK=64, num_warps=1,
num_stages=1)
buf54 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_mul_pow_sub_where_1[grid(256)](arg0_1,
buf53, buf1, buf52, buf35, buf54, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf55 = buf52
del buf52
buf56 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_2[grid(64)](buf55,
buf54, buf1, buf53, buf35, arg0_1, buf56, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf57 = buf54
del buf54
triton_poi_fused_add_div_mul_sub_where_3[grid(256)](arg0_1, buf56,
buf1, buf55, buf35, buf57, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf58 = buf55
del buf55
buf59 = buf53
del buf53
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_4[grid(64)](buf58,
buf57, buf1, buf56, buf35, arg0_1, buf59, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf60 = buf57
del buf57
triton_poi_fused_add_div_mul_sub_where_5[grid(256)](arg0_1, buf59,
buf1, buf58, buf35, buf60, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf61 = buf58
del buf58
buf66 = buf56
del buf56
buf67 = reinterpret_tensor(buf66, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf66
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_6[grid(64)](buf61,
buf67, buf60, buf1, buf59, buf35, arg0_1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf68 = buf60
del buf60
triton_poi_fused_add_clamp_div_mul_pow_sub_7[grid(256)](arg0_1,
buf67, buf35, buf68, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf69 = buf67
del buf67
triton_poi_fused_add_div_where_8[grid(64)](buf69, buf68, buf1,
buf35, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf70 = buf68
del buf68
triton_poi_fused_add_clamp_div_mul_pow_sub_9[grid(256)](arg0_1,
buf69, buf35, buf70, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf71 = buf69
del buf69
triton_poi_fused_add_div_where_10[grid(64)](buf71, buf70, buf1,
buf35, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf72 = buf70
del buf70
triton_poi_fused_add_clamp_div_mul_sub_11[grid(256)](arg0_1, buf71,
buf35, buf72, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf73 = buf71
del buf71
triton_poi_fused_add_div_where_12[grid(64)](buf73, buf72, buf1,
buf35, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf74 = buf72
del buf72
triton_poi_fused_add_clamp_div_mul_sub_13[grid(256)](arg0_1, buf73,
buf35, buf74, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf75 = buf73
del buf73
triton_poi_fused_add_div_where_14[grid(64)](buf75, buf74, buf1,
buf35, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf76 = buf74
del buf74
triton_poi_fused_add_div_mul_sub_15[grid(256)](arg0_1, buf75, buf35,
buf76, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf77 = buf75
del buf75
triton_poi_fused_add_div_where_16[grid(64)](buf77, buf76, buf1,
buf35, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf78 = buf76
del buf76
triton_poi_fused_add_div_mul_sub_17[grid(256)](arg0_1, buf77, buf35,
buf78, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf79 = buf77
del buf77
triton_poi_fused_add_div_where_18[grid(64)](buf79, buf78, buf1,
buf35, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf80 = buf78
del buf78
triton_poi_fused_add_div_mul_sub_19[grid(256)](arg0_1, buf79, buf35,
buf80, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf81 = buf79
del buf79
buf95 = reinterpret_tensor(buf61, (4, 4, 4), (16, 4, 1), 0)
del buf61
buf96 = reinterpret_tensor(buf95, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf95
buf97 = buf59
del buf59
triton_poi_fused_add_clamp_div_mul_pow_sub_sum_where_20[grid(64)](buf81
, buf96, buf80, buf1, buf35, arg0_1, buf97, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf1
del buf35
del buf81
buf98 = buf80
del buf80
triton_poi_fused_add_clamp_div_mul_pow_sub_where_21[grid(256)](arg0_1,
buf96, buf97, buf98, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del buf96
del buf97
return buf98,
def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True):
"""alpha-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
This function is differentiable with respect to both X and alpha.
Parameters
----------
X : torch.Tensor
The input tensor.
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax, and alpha=1 corresponds to
softmax (but computing it this way is likely unstable).
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class EntmaxBisectNew(nn.Module):
def __init__(self, alpha=1.5, dim=-1, n_iter=50):
"""alpha-entmax: normalizing sparse map (a la softmax) via bisection.
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
Parameters
----------
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax; alpha=1 corresponds
to softmax (but computing it this way is likely unstable).
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
"""
self.dim = dim
self.n_iter = n_iter
self.alpha = alpha
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| roholazandie/entmax | EntmaxBisect | false | 7,629 | [
"MIT"
] | 1 | 657374e6a792ec6840b6f78bc759cc1f51570aad | https://github.com/roholazandie/entmax/tree/657374e6a792ec6840b6f78bc759cc1f51570aad | from torch.autograd import Function
import torch
import torch.nn as nn
def entmax_bisect(X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True):
"""alpha-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
This function is differentiable with respect to both X and alpha.
Parameters
----------
X : torch.Tensor
The input tensor.
alpha : float or torch.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != dim else 1)
A value of alpha=2 corresponds to sparsemax, and alpha=1 corresponds to
softmax (but computing it this way is likely unstable).
dim : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Returns
-------
P : torch tensor, same shape as X
The projection result, such that P.sum(dim=dim) == 1 elementwise.
"""
return EntmaxBisectFunction.apply(X, alpha, dim, n_iter, ensure_sum_one)
class EntmaxBisectFunction(Function):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(torch.clamp(X, min=0), alpha)
@classmethod
def forward(cls, ctx, X, alpha=1.5, dim=-1, n_iter=50, ensure_sum_one=True
):
if not isinstance(alpha, torch.Tensor):
alpha = torch.tensor(alpha, dtype=X.dtype, device=X.device)
alpha_shape = list(X.shape)
alpha_shape[dim] = 1
alpha = alpha.expand(*alpha_shape)
ctx.alpha = alpha
ctx.dim = dim
d = X.shape[dim]
X = X * (alpha - 1)
max_val, _ = X.max(dim=dim, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(dim) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(dim) - 1
mask = (f_m * f_lo >= 0).unsqueeze(dim)
tau_lo = torch.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(dim=dim).unsqueeze(dim=dim)
ctx.save_for_backward(p_m)
return p_m
@classmethod
def backward(cls, ctx, dY):
Y, = ctx.saved_tensors
gppr = torch.where(Y > 0, Y ** (2 - ctx.alpha), Y.new_zeros(1))
dX = dY * gppr
q = dX.sum(ctx.dim) / gppr.sum(ctx.dim)
q = q.unsqueeze(ctx.dim)
dX -= q * gppr
d_alpha = None
if ctx.needs_input_grad[1]:
S = torch.where(Y > 0, Y * torch.log(Y), Y.new_zeros(1))
ent = S.sum(ctx.dim).unsqueeze(ctx.dim)
Y_skewed = gppr / gppr.sum(ctx.dim).unsqueeze(ctx.dim)
d_alpha = dY * (Y - Y_skewed) / (ctx.alpha - 1) ** 2
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.dim).unsqueeze(ctx.dim)
return dX, d_alpha, None, None, None
class Model(nn.Module):
def __init__(self, alpha=1.5, dim=-1, n_iter=50):
"""alpha-entmax: normalizing sparse map (a la softmax) via bisection.
Solves the optimization problem:
max_p <x, p> - H_a(p)
# ... truncated (>4000 chars) for memory efficiency |
ResNetV2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_4/inductor_cache/a2/ca2l7bjxfwrklzvcxfa2hnyzqh3p6neak37vi6fkugdhbu26fbpz.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/co/ccosum7u5lx5fx5hf5opofiygxj2ntiq67yo5gfegevmhtkaru4r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/qg/cqg4z653mpzmif22rwtpmv42y4lbkkxhxjqguwoxl3wb6cn5fn7k.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ot/cotn5a2cqhwvdw4ugt6b2a4jl2ou2mh37mnmwxgwogdqw4kcufhp.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1048576
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = (yindex // 1024)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (1024*x2) + (9216*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7n/c7npfx4cng24bae4uqu2hpgblpis6j6mmnvhinuzjms74o3kespg.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4194304, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4194304
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2048
y1 = (yindex // 2048)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (2048*x2) + (18432*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ck/cck257ksuszi7rylew7fge2srrwr6phqjm3wbowg7merkmnxmshd.py
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# sqrt => sqrt
# sub => sub
# var_mean => var_mean
# w => div
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-10), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_6 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (147*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (147*x0)), tmp23, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/nt/cntsn7m3r2xjxa4wdf4m34w7p5wrpvazgixessnri35z7hkzioyv.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# input_2 => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%convolution, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_7 = async_compile.triton('triton_poi_fused_constant_pad_nd_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1183744
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = (xindex // 8704) % 34
x1 = (xindex // 256) % 34
x3 = (xindex // 295936)
x4 = xindex % 8704
x6 = xindex
tmp0 = (-1) + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-8448) + x4 + (8192*x2) + (262144*x3)), tmp10, other=0.0)
tl.store(out_ptr0 + (x6), tmp11, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bf/cbf5dxwqyr3hqcx4qflqp4smyzjh74okbtbp3q4x6lxhqo5bx6kt.py
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_3 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = (xindex // 256) % 16
x2 = (xindex // 4096) % 16
x3 = (xindex // 65536)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp5 = tl.load(in_ptr0 + (8704 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp7 = tl.load(in_ptr0 + (8960 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp9 = tl.load(in_ptr0 + (9216 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp11 = tl.load(in_ptr0 + (17408 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp13 = tl.load(in_ptr0 + (17664 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp15 = tl.load(in_ptr0 + (17920 + x0 + (512*x1) + (17408*x2) + (295936*x3)), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x4), tmp16, None)
tl.store(out_ptr1 + (x4), tmp41, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/v6/cv6xitrbrydxmqmrppdwzu4z4evlalp2yvk4c5vppkczm3j3ligg.py
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm => add_1, rsqrt, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_red_fused_native_group_norm_9 = async_compile.triton('triton_red_fused_native_group_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = (rindex // 8)
tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fa/cfasmvqd2wokec24hd5leef4rosb4wbws23u3moszdu6v4nwesae.py
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm => add_2, mul_1
# out => relu
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {})
triton_poi_fused_native_group_norm_relu_10 = async_compile.triton('triton_poi_fused_native_group_norm_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7p/c7p6yuwjc2qmdlwhk2ntbb3enj6br3kuvouoikxqtnwnwwyibm2m.py
# Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_1 => add_3
# sqrt_1 => sqrt_1
# sub_1 => sub_2
# var_mean_1 => var_mean_2
# w_1 => div_1
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_5, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_5, %getitem_7), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-10), kwargs = {})
# %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt_1), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_11 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1024, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1024
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/hy/chyilz3henugk2yh7bv56i2xnaqemzqpl6po2ro6wijtbh46vtdh.py
# Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_2 => add_4
# sqrt_2 => sqrt_2
# sub_2 => sub_3
# var_mean_2 => var_mean_3
# w_2 => div_2
# Graph fragment:
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_6, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_6, %getitem_9), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-10), kwargs = {})
# %sqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_4,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_3, %sqrt_2), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_12 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_12(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 256
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3y/c3yxgcofabvzf5b3eonlvvlvp5hq7inszok367sxc2uhnt62smgv.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_2 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_9, %primals_10, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_13 = async_compile.triton('triton_poi_fused_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fl/cflhmffcentzihxd2lm4f4c7jl6bncsabeh44cewr2fojudyc2k4.py
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_4 => add_10
# Graph fragment:
# %add_10 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_4, %convolution_1), kwargs = {})
triton_poi_fused_add_14 = async_compile.triton('triton_poi_fused_add_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/si/csiiojtlgokw2jw42ubk2xbgw5jlgcxayafs3q7zswsfedxizwoc.py
# Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_3 => add_11, rsqrt_3, var_mean_7
# Graph fragment:
# %var_mean_7 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_16, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_11,), kwargs = {})
triton_red_fused_native_group_norm_15 = async_compile.triton('triton_red_fused_native_group_norm_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (262144*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pq/cpqmt4ehbn6is274yejx4zty2dzaa7qy7b6yhnpuy3tx6223wuua.py
# Topologically Sorted Source Nodes: [group_norm_3, out_4], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_3 => add_12, mul_7
# out_4 => relu_3
# Graph fragment:
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, %unsqueeze_23), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_7, %unsqueeze_20), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_12,), kwargs = {})
triton_poi_fused_native_group_norm_relu_16 = async_compile.triton('triton_poi_fused_native_group_norm_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 262144)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rf/crfnkyefyns5oxfvn3xcagl2davuc4ecbrrklayhwc5y4v54ackv.py
# Topologically Sorted Source Nodes: [var_mean_4, sub_4, add_5, sqrt_4, w_4], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_5 => add_13
# sqrt_4 => sqrt_4
# sub_4 => sub_8
# var_mean_4 => var_mean_8
# w_4 => div_4
# Graph fragment:
# %var_mean_8 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_16, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_16, %getitem_19), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_18, 1e-10), kwargs = {})
# %sqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_13,), kwargs = {})
# %div_4 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_8, %sqrt_4), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_17 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 256
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2i/c2iroi5tyq6bdv2nnl6vpicnvrvjqphovygeuprbla3wfoavamb7.py
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_5 => add_19
# Graph fragment:
# %add_19 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_7, %add_10), kwargs = {})
triton_poi_fused_add_18 = async_compile.triton('triton_poi_fused_add_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_18', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/6a/c6aj2cpzdg3dhbfhkkua4xiwtcie4yfjb4mh66com5kropbmjwni.py
# Topologically Sorted Source Nodes: [var_mean_10, sub_10, add_14, sqrt_10, w_10], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_14 => add_40
# sqrt_10 => sqrt_10
# sub_10 => sub_23
# var_mean_10 => var_mean_23
# w_10 => div_10
# Graph fragment:
# %var_mean_23 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_46, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_46, %getitem_49), kwargs = {})
# %add_40 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_48, 1e-10), kwargs = {})
# %sqrt_10 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_40,), kwargs = {})
# %div_10 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_23, %sqrt_10), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_19 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[2048, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 2048
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/o6/co6t5ttpyhicnphta2nl3sop7ipjogaapb57hy7c7z6oibxmwpd7.py
# Topologically Sorted Source Nodes: [var_mean_11, sub_11, add_15, sqrt_11, w_11], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_15 => add_41
# sqrt_11 => sqrt_11
# sub_11 => sub_24
# var_mean_11 => var_mean_24
# w_11 => div_11
# Graph fragment:
# %var_mean_24 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_47, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_47, %getitem_51), kwargs = {})
# %add_41 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_50, 1e-10), kwargs = {})
# %sqrt_11 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_41,), kwargs = {})
# %div_11 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_24, %sqrt_11), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_20 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_20', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_20(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/am/cam45fkuyx7dbg52ucmhl65jnrvcnlo7pyx6a2j5lvsd5bviopuj.py
# Topologically Sorted Source Nodes: [group_norm_13], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_13 => add_42, rsqrt_13, var_mean_25
# Graph fragment:
# %var_mean_25 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_26, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_42 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_52, 1e-05), kwargs = {})
# %rsqrt_13 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_42,), kwargs = {})
triton_red_fused_native_group_norm_21 = async_compile.triton('triton_red_fused_native_group_norm_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_21(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/fy/cfy57czzc2qkjv6pjz77rgn32p5crruqb55nl4xixwalsbs5mwei.py
# Topologically Sorted Source Nodes: [group_norm_13, relu_13], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_13 => add_43, mul_27
# relu_13 => relu_13
# Graph fragment:
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_27, %unsqueeze_83), kwargs = {})
# %add_43 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_27, %unsqueeze_80), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_43,), kwargs = {})
triton_poi_fused_native_group_norm_relu_22 = async_compile.triton('triton_poi_fused_native_group_norm_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_22(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = (xindex // 131072)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/rn/crns2mhxlz3fmf3u6euptei4k75a2kpn32faj6chx5qgflid5c6b.py
# Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_18 => convolution_16
# Graph fragment:
# %convolution_16 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_13, %primals_50, %primals_51, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_23 = async_compile.triton('triton_poi_fused_convolution_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_23', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7q/c7qjvpiszscw2ipcio4ffhrmgfbubit4radh6syg4zqvckqbz52z.py
# Topologically Sorted Source Nodes: [group_norm_14], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_14 => add_44, rsqrt_14, var_mean_26
# Graph fragment:
# %var_mean_26 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_28, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_44 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_54, 1e-05), kwargs = {})
# %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_44,), kwargs = {})
triton_per_fused_native_group_norm_24 = async_compile.triton('triton_per_fused_native_group_norm_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_24', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_24(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = (rindex // 16)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ad/cadjzg3pzepovud42t5i6gbftlolabw3fp27unoow5hoyvjliyaa.py
# Topologically Sorted Source Nodes: [group_norm_14, relu_14], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_14 => add_45, mul_29
# relu_14 => relu_14
# Graph fragment:
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_29, %unsqueeze_89), kwargs = {})
# %add_45 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_29, %unsqueeze_86), kwargs = {})
# %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_45,), kwargs = {})
triton_poi_fused_native_group_norm_relu_25 = async_compile.triton('triton_poi_fused_native_group_norm_relu_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_25(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = (xindex // 32768)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/mr/cmru36jvazn4nvftmbz3p4esd55x5bjsn5arm2fs34ru32eusb5g.py
# Topologically Sorted Source Nodes: [var_mean_12, sub_12, add_16, sqrt_12, w_12], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_16 => add_46
# sqrt_12 => sqrt_12
# sub_12 => sub_27
# var_mean_12 => var_mean_27
# w_12 => div_12
# Graph fragment:
# %var_mean_27 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_54, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_54, %getitem_57), kwargs = {})
# %add_46 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_56, 1e-10), kwargs = {})
# %sqrt_12 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_46,), kwargs = {})
# %div_12 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_27, %sqrt_12), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_26 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[2048, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_26', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_26(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 2048
XBLOCK: tl.constexpr = 1
rnumel = 512
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (512*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (512*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2c/c2c52oqydx2qqnye3ms2nhtnjykxyv6xutsiklmey66ax3u3kxre.py
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_8 => add_47
# Graph fragment:
# %add_47 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_17, %convolution_14), kwargs = {})
triton_poi_fused_add_27 = async_compile.triton('triton_poi_fused_add_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bz/cbzasy4ze2re4vldncooexegruzzqnk6h26e2lq65kivzh26krpx.py
# Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_15 => add_48, rsqrt_15, var_mean_28
# Graph fragment:
# %var_mean_28 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_58, 1e-05), kwargs = {})
# %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_48,), kwargs = {})
triton_red_fused_native_group_norm_28 = async_compile.triton('triton_red_fused_native_group_norm_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ai/caiz2ipqaqy3yvfxd23r6kgede3xfyove4vfzoze622iyk6yxnob.py
# Topologically Sorted Source Nodes: [group_norm_15, out_20], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_15 => add_49, mul_31
# out_20 => relu_15
# Graph fragment:
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %unsqueeze_95), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_31, %unsqueeze_92), kwargs = {})
# %relu_15 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_49,), kwargs = {})
triton_poi_fused_native_group_norm_relu_29 = async_compile.triton('triton_poi_fused_native_group_norm_relu_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 131072)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/q7/cq7qfvsx4rl6ppxrincqirqseiykkhanw5f6ciztpudikgkuztk7.py
# Topologically Sorted Source Nodes: [var_mean_13, sub_13, add_18, sqrt_13, w_13], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_18 => add_50
# sqrt_13 => sqrt_13
# sub_13 => sub_29
# var_mean_13 => var_mean_29
# w_13 => div_13
# Graph fragment:
# %var_mean_29 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_57, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_29 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_57, %getitem_61), kwargs = {})
# %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_60, 1e-10), kwargs = {})
# %sqrt_13 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_50,), kwargs = {})
# %div_13 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_29, %sqrt_13), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_30 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[512, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_30', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 512
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/46/c46uzwtkm72q554z66kfflo66d3jxej542mlrkn5kejeivkb77fl.py
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_9 => add_56
# Graph fragment:
# %add_56 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_20, %add_47), kwargs = {})
triton_poi_fused_add_31 = async_compile.triton('triton_poi_fused_add_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_31', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_31(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kt/cktrt7d4uohqooff3hbktlyw3jxflx7srzvymlnrkdul5brzhp4v.py
# Topologically Sorted Source Nodes: [var_mean_19, sub_19, add_27, sqrt_19, w_19], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_27 => add_77
# sqrt_19 => sqrt_19
# sub_19 => sub_44
# var_mean_19 => var_mean_44
# w_19 => div_19
# Graph fragment:
# %var_mean_44 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_87, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_87, %getitem_91), kwargs = {})
# %add_77 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_90, 1e-10), kwargs = {})
# %sqrt_19 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_77,), kwargs = {})
# %div_19 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_44, %sqrt_19), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_32 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[4096, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_32', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 4096
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/2p/c2pa75h6vcympkonevngx7jgya3va4wrjf32c2j6odcqubxn55kq.py
# Topologically Sorted Source Nodes: [var_mean_20, sub_20, add_28, sqrt_20, w_20], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_28 => add_78
# sqrt_20 => sqrt_20
# sub_20 => sub_45
# var_mean_20 => var_mean_45
# w_20 => div_20
# Graph fragment:
# %var_mean_45 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_88, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_88, %getitem_93), kwargs = {})
# %add_78 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_92, 1e-10), kwargs = {})
# %sqrt_20 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_78,), kwargs = {})
# %div_20 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_45, %sqrt_20), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_33 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1024, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_33', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_33(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1024
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pk/cpkn7dyjdvgjdsd22adrasuzoh2m2mcmlbhqoa355to4spbmfc6o.py
# Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_25 => add_79, rsqrt_25, var_mean_46
# Graph fragment:
# %var_mean_46 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_50, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_79 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_94, 1e-05), kwargs = {})
# %rsqrt_25 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_79,), kwargs = {})
triton_red_fused_native_group_norm_34 = async_compile.triton('triton_red_fused_native_group_norm_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_34(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/do/cdosiy5sv6smtosgbknn2mualigm45xk4rnnmiei2dbhnr6jbcyh.py
# Topologically Sorted Source Nodes: [group_norm_25, relu_25], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_25 => add_80, mul_51
# relu_25 => relu_25
# Graph fragment:
# %mul_51 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_51, %unsqueeze_155), kwargs = {})
# %add_80 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_51, %unsqueeze_152), kwargs = {})
# %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_80,), kwargs = {})
triton_poi_fused_native_group_norm_relu_35 = async_compile.triton('triton_poi_fused_native_group_norm_relu_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3e/c3edz2jlgd7x3ojnazo34vks4lvi36gew45ffjv4j6xacqlcirpz.py
# Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_34 => convolution_29
# Graph fragment:
# %convolution_29 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_25, %primals_91, %primals_92, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_36 = async_compile.triton('triton_poi_fused_convolution_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_36', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/u5/cu5qbvq7rhsj5qzjpgn4u3l7m2flsdl6jv6rsorhhef7z7t5rpeu.py
# Topologically Sorted Source Nodes: [group_norm_26], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_26 => add_81, rsqrt_26, var_mean_47
# Graph fragment:
# %var_mean_47 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_52, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_81 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_96, 1e-05), kwargs = {})
# %rsqrt_26 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_81,), kwargs = {})
triton_per_fused_native_group_norm_37 = async_compile.triton('triton_per_fused_native_group_norm_37', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_37', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_37(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 512
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = (rindex // 32)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (16384*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ne/cne7llmjajc2ac624wbhyotx4of4ba24zgfkmy5mj6hluta3oajy.py
# Topologically Sorted Source Nodes: [group_norm_26, relu_26], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_26 => add_82, mul_53
# relu_26 => relu_26
# Graph fragment:
# %mul_53 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_53, %unsqueeze_161), kwargs = {})
# %add_82 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_53, %unsqueeze_158), kwargs = {})
# %relu_26 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_82,), kwargs = {})
triton_poi_fused_native_group_norm_relu_38 = async_compile.triton('triton_poi_fused_native_group_norm_relu_38', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_38', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_38(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 16384)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/pw/cpwuyihukeko3sz2mange4jiyuc6hegivng7t6xlmjwzff2jslfm.py
# Topologically Sorted Source Nodes: [var_mean_21, sub_21, add_29, sqrt_21, w_21], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_29 => add_83
# sqrt_21 => sqrt_21
# sub_21 => sub_48
# var_mean_21 => var_mean_48
# w_21 => div_21
# Graph fragment:
# %var_mean_48 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_95, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_95, %getitem_99), kwargs = {})
# %add_83 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_98, 1e-10), kwargs = {})
# %sqrt_21 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_83,), kwargs = {})
# %div_21 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_48, %sqrt_21), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_39 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_39', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4096, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_39', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_39(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 4096
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/kt/cktxvckhfax7pjim22s6n5uzucjshabg76bthqfv5bt7xvx2rg7v.py
# Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_12 => add_84
# Graph fragment:
# %add_84 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_30, %convolution_27), kwargs = {})
triton_poi_fused_add_40 = async_compile.triton('triton_poi_fused_add_40', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_40', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_40(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/3z/c3zgsvchcnqibeqy5jdmcbb664t2dc7anoixlyfsbcbmr7q6etbj.py
# Topologically Sorted Source Nodes: [group_norm_27], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_27 => add_85, rsqrt_27, var_mean_49
# Graph fragment:
# %var_mean_49 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_54, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_85 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_100, 1e-05), kwargs = {})
# %rsqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_85,), kwargs = {})
triton_red_fused_native_group_norm_41 = async_compile.triton('triton_red_fused_native_group_norm_41', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = (rindex // 128)
tmp0 = tl.load(in_ptr0 + (r2 + (128*x0) + (4096*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5j/c5jagcpyiyis7hpqnt5po5ouog6tiegjmrxnuvik36qafxw6ph47.py
# Topologically Sorted Source Nodes: [group_norm_27, out_36], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_27 => add_86, mul_55
# out_36 => relu_27
# Graph fragment:
# %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_55, %unsqueeze_167), kwargs = {})
# %add_86 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_55, %unsqueeze_164), kwargs = {})
# %relu_27 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_86,), kwargs = {})
triton_poi_fused_native_group_norm_relu_42 = async_compile.triton('triton_poi_fused_native_group_norm_relu_42', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_42', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_42(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/5c/c5cnjqjnvm5kgsecw54s3qbswpzb3ndbzzivmfbmbr5ek3n7tfho.py
# Topologically Sorted Source Nodes: [var_mean_22, sub_22, add_31, sqrt_22, w_22], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_31 => add_87
# sqrt_22 => sqrt_22
# sub_22 => sub_50
# var_mean_22 => var_mean_50
# w_22 => div_22
# Graph fragment:
# %var_mean_50 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_98, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_50 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_98, %getitem_103), kwargs = {})
# %add_87 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_102, 1e-10), kwargs = {})
# %sqrt_22 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_87,), kwargs = {})
# %div_22 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_50, %sqrt_22), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_43 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_43', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1024, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_43', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1024
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/dl/cdlkdgo2g6uuwzzliq3ols47a2pt4qrqa2hxzxqniidji2r3opyh.py
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_13 => add_93
# Graph fragment:
# %add_93 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_33, %add_84), kwargs = {})
triton_poi_fused_add_44 = async_compile.triton('triton_poi_fused_add_44', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_44', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_44(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/p2/cp2pkyyfmqpfiixnv3jjjvia7zvprrskyyin2oz4s5h4td3uobbn.py
# Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_40, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_40 => add_114
# sqrt_28 => sqrt_28
# sub_28 => sub_65
# var_mean_28 => var_mean_65
# w_28 => div_28
# Graph fragment:
# %var_mean_65 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_128, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_65 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_128, %getitem_133), kwargs = {})
# %add_114 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_132, 1e-10), kwargs = {})
# %sqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_114,), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_65, %sqrt_28), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_45 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_45', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[8192, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_45', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_45(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 8192
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/7v/c7vndo53sxch2xfxk2j225jshkuji44jqxb3kasd35ift5bl4cqu.py
# Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_41, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_41 => add_115
# sqrt_29 => sqrt_29
# sub_29 => sub_66
# var_mean_29 => var_mean_66
# w_29 => div_29
# Graph fragment:
# %var_mean_66 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_129, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_66 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_129, %getitem_135), kwargs = {})
# %add_115 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_134, 1e-10), kwargs = {})
# %sqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_115,), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_66, %sqrt_29), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_46 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_46', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[2048, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_46', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_46(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 2048
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c4/cc4m7vudmm3cnmrdoq6nrxav3dfryhniaztwf56ak3xwbsoexd6j.py
# Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_37 => add_116, rsqrt_37, var_mean_67
# Graph fragment:
# %var_mean_67 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_74, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_116 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_136, 1e-05), kwargs = {})
# %rsqrt_37 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_116,), kwargs = {})
triton_per_fused_native_group_norm_47 = async_compile.triton('triton_per_fused_native_group_norm_47', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_47', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_47(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = (rindex // 64)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/c4/cc4yjngdidzi6e5bjkxxkejgcpj7dbiephcer54rlmajddthlte7.py
# Topologically Sorted Source Nodes: [group_norm_37, relu_37], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_37 => add_117, mul_75
# relu_37 => relu_37
# Graph fragment:
# %mul_75 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_75, %unsqueeze_227), kwargs = {})
# %add_117 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_75, %unsqueeze_224), kwargs = {})
# %relu_37 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_117,), kwargs = {})
triton_poi_fused_native_group_norm_relu_48 = async_compile.triton('triton_poi_fused_native_group_norm_relu_48', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_48', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_48(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 32768)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bg/cbgnzftadm3ygxe6jr4nyqfszukghqcdcun5ko2zoekytzrl7ze7.py
# Topologically Sorted Source Nodes: [out_50], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_50 => convolution_42
# Graph fragment:
# %convolution_42 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_37, %primals_132, %primals_133, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_49 = async_compile.triton('triton_poi_fused_convolution_49', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_49', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_49(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/lc/clcxsrztfooasql5b6o2fhwxukcgbzvvpsfy34xamrihxhotepnv.py
# Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_38 => add_118, rsqrt_38, var_mean_68
# Graph fragment:
# %var_mean_68 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_76, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_118 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_138, 1e-05), kwargs = {})
# %rsqrt_38 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_118,), kwargs = {})
triton_per_fused_native_group_norm_50 = async_compile.triton('triton_per_fused_native_group_norm_50', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_50', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_50(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = (rindex // 64)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (8192*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/76/c76lb5dgw7uy4lpt3ebksnkexpk5a4mcifrw43rrzrmwnhrksd6u.py
# Topologically Sorted Source Nodes: [group_norm_38, relu_38], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_38 => add_119, mul_77
# relu_38 => relu_38
# Graph fragment:
# %mul_77 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_77, %unsqueeze_233), kwargs = {})
# %add_119 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_77, %unsqueeze_230), kwargs = {})
# %relu_38 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_119,), kwargs = {})
triton_poi_fused_native_group_norm_relu_51 = async_compile.triton('triton_poi_fused_native_group_norm_relu_51', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_51', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_51(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 8192)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/a4/ca4jkajxllucjxjlawcrmgbqoc3gf3uabc23n74mijvijx6un6v4.py
# Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_42, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_42 => add_120
# sqrt_30 => sqrt_30
# sub_30 => sub_69
# var_mean_30 => var_mean_69
# w_30 => div_30
# Graph fragment:
# %var_mean_69 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_136, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_69 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_136, %getitem_141), kwargs = {})
# %add_120 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_140, 1e-10), kwargs = {})
# %sqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_120,), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_69, %sqrt_30), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_52 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_52', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[8192, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_52', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_52(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 8192
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/tr/ctrvbts2mcg36tlsxhn7jg7xen2u6a7kd7u4n3mosznsb6fa2wyo.py
# Topologically Sorted Source Nodes: [input_16], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_16 => add_121
# Graph fragment:
# %add_121 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_43, %convolution_40), kwargs = {})
triton_poi_fused_add_53 = async_compile.triton('triton_poi_fused_add_53', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_53', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_53(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_out_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/bm/cbmg74gizsfkitta6qalg37yo25qnqf63c5jyevmzhqmxnjx7xom.py
# Topologically Sorted Source Nodes: [group_norm_39], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_39 => add_122, rsqrt_39, var_mean_70
# Graph fragment:
# %var_mean_70 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_78, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_122 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_142, 1e-05), kwargs = {})
# %rsqrt_39 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_122,), kwargs = {})
triton_per_fused_native_group_norm_54 = async_compile.triton('triton_per_fused_native_group_norm_54', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_54', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_54(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = (rindex // 256)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (256*x0) + (8192*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/gz/cgzyjy3o2gqoit7amj557zg7toflqyj43kjgzl3ej62i7hk6lnee.py
# Topologically Sorted Source Nodes: [group_norm_39, out_52], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_39 => add_123, mul_79
# out_52 => relu_39
# Graph fragment:
# %mul_79 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_79, %unsqueeze_239), kwargs = {})
# %add_123 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_79, %unsqueeze_236), kwargs = {})
# %relu_39 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_123,), kwargs = {})
triton_poi_fused_native_group_norm_relu_55 = async_compile.triton('triton_poi_fused_native_group_norm_relu_55', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_55', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_55(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 8192
x2 = (xindex // 32768)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 256)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 256)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/aj/cajj5ki6azp2kbx2jxbslibq6dzfyc3ipjv2qjovureewr6dmplt.py
# Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_44, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_44 => add_124
# sqrt_31 => sqrt_31
# sub_31 => sub_71
# var_mean_31 => var_mean_71
# w_31 => div_31
# Graph fragment:
# %var_mean_71 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_139, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_71 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_139, %getitem_145), kwargs = {})
# %add_124 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_144, 1e-10), kwargs = {})
# %sqrt_31 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_124,), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_71, %sqrt_31), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_56 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_56', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[2048, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_56', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_56(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 2048
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (8192*x0)), rmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (8192*x0)), tmp12, rmask)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ff/cff7brpmobqi2vug2ta24kdr3loqrvuixh2kh2dnci64fj23ijsj.py
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.add]
# Source node to ATen node mapping:
# input_17 => add_130
# Graph fragment:
# %add_130 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_46, %add_121), kwargs = {})
triton_poi_fused_add_57 = async_compile.triton('triton_poi_fused_add_57', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_57', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_57(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr0 + (x0), None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sz/cszq7gbtdxzwt3qzvghesh6nlz5v2y6sbgs46237zpvjscw4bbk4.py
# Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# input_20 => add_149, rsqrt_48, var_mean_85
# Graph fragment:
# %var_mean_85 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_96, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_149 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_172, 1e-05), kwargs = {})
# %rsqrt_48 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_149,), kwargs = {})
triton_per_fused_native_group_norm_58 = async_compile.triton('triton_per_fused_native_group_norm_58', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_58', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_58(in_out_ptr0, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = (rindex // 256)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (256*x0) + (8192*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/ho/chohdgeexcik3iyqkflbyj2cmb4wqtijhbp72tnfgmvhausg25tu.py
# Topologically Sorted Source Nodes: [input_20, input_21, input_22], Original ATen: [aten.native_group_norm, aten.relu, aten.mean]
# Source node to ATen node mapping:
# input_20 => add_150, mul_97
# input_21 => relu_48
# input_22 => mean
# Graph fragment:
# %mul_97 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_97, %unsqueeze_293), kwargs = {})
# %add_150 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_97, %unsqueeze_290), kwargs = {})
# %relu_48 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%add_150,), kwargs = {})
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%relu_48, [-1, -2], True), kwargs = {})
triton_poi_fused_mean_native_group_norm_relu_59 = async_compile.triton('triton_poi_fused_mean_native_group_norm_relu_59', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_native_group_norm_relu_59', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_native_group_norm_relu_59(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8192
x1 = (xindex // 8192)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (32768*x1)), None)
tmp1 = tl.load(in_ptr1 + ((x2 // 256)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((x2 // 256)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (8192 + x0 + (32768*x1)), None)
tmp18 = tl.load(in_ptr0 + (16384 + x0 + (32768*x1)), None)
tmp25 = tl.load(in_ptr0 + (24576 + x0 + (32768*x1)), None)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = tmp11 - tmp1
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp5
tmp15 = tmp14 + tmp7
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp17 = tmp10 + tmp16
tmp19 = tmp18 - tmp1
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp5
tmp22 = tmp21 + tmp7
tmp23 = triton_helpers.maximum(tmp9, tmp22)
tmp24 = tmp17 + tmp23
tmp26 = tmp25 - tmp1
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp5
tmp29 = tmp28 + tmp7
tmp30 = triton_helpers.maximum(tmp9, tmp29)
tmp31 = tmp24 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.store(out_ptr0 + (x2), tmp33, None)
''', device_str='cuda')
# kernel path: runs/run_shard_4/inductor_cache/sw/csw2vezjin4bvf6fhnzk3tefnt5ol3gapui4lrf37dd4xtvyd7fq.py
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# input_23 => convolution_53
# Graph fragment:
# %convolution_53 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_169, %primals_170, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_60 = async_compile.triton('triton_poi_fused_convolution_60', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_60', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_60(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 87372
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 21843
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170 = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, ), (1, ))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (256, ), (1, ))
assert_size_stride(primals_8, (256, ), (1, ))
assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (256, ), (1, ))
assert_size_stride(primals_11, (256, ), (1, ))
assert_size_stride(primals_12, (256, ), (1, ))
assert_size_stride(primals_13, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_14, (1024, ), (1, ))
assert_size_stride(primals_15, (1024, ), (1, ))
assert_size_stride(primals_16, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (256, ), (1, ))
assert_size_stride(primals_19, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_20, (256, ), (1, ))
assert_size_stride(primals_21, (256, ), (1, ))
assert_size_stride(primals_22, (256, ), (1, ))
assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_24, (1024, ), (1, ))
assert_size_stride(primals_25, (1024, ), (1, ))
assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_27, (256, ), (1, ))
assert_size_stride(primals_28, (256, ), (1, ))
assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (256, ), (1, ))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (256, ), (1, ))
assert_size_stride(primals_33, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_34, (1024, ), (1, ))
assert_size_stride(primals_35, (1024, ), (1, ))
assert_size_stride(primals_36, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_37, (256, ), (1, ))
assert_size_stride(primals_38, (256, ), (1, ))
assert_size_stride(primals_39, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_40, (256, ), (1, ))
assert_size_stride(primals_41, (256, ), (1, ))
assert_size_stride(primals_42, (256, ), (1, ))
assert_size_stride(primals_43, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_44, (1024, ), (1, ))
assert_size_stride(primals_45, (1024, ), (1, ))
assert_size_stride(primals_46, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_48, (512, ), (1, ))
assert_size_stride(primals_49, (512, ), (1, ))
assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_51, (512, ), (1, ))
assert_size_stride(primals_52, (512, ), (1, ))
assert_size_stride(primals_53, (512, ), (1, ))
assert_size_stride(primals_54, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_55, (2048, ), (1, ))
assert_size_stride(primals_56, (2048, ), (1, ))
assert_size_stride(primals_57, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_58, (512, ), (1, ))
assert_size_stride(primals_59, (512, ), (1, ))
assert_size_stride(primals_60, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_61, (512, ), (1, ))
assert_size_stride(primals_62, (512, ), (1, ))
assert_size_stride(primals_63, (512, ), (1, ))
assert_size_stride(primals_64, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_65, (2048, ), (1, ))
assert_size_stride(primals_66, (2048, ), (1, ))
assert_size_stride(primals_67, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_68, (512, ), (1, ))
assert_size_stride(primals_69, (512, ), (1, ))
assert_size_stride(primals_70, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_71, (512, ), (1, ))
assert_size_stride(primals_72, (512, ), (1, ))
assert_size_stride(primals_73, (512, ), (1, ))
assert_size_stride(primals_74, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_75, (2048, ), (1, ))
assert_size_stride(primals_76, (2048, ), (1, ))
assert_size_stride(primals_77, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_78, (512, ), (1, ))
assert_size_stride(primals_79, (512, ), (1, ))
assert_size_stride(primals_80, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_81, (512, ), (1, ))
assert_size_stride(primals_82, (512, ), (1, ))
assert_size_stride(primals_83, (512, ), (1, ))
assert_size_stride(primals_84, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_85, (2048, ), (1, ))
assert_size_stride(primals_86, (2048, ), (1, ))
assert_size_stride(primals_87, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_88, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_89, (1024, ), (1, ))
assert_size_stride(primals_90, (1024, ), (1, ))
assert_size_stride(primals_91, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_92, (1024, ), (1, ))
assert_size_stride(primals_93, (1024, ), (1, ))
assert_size_stride(primals_94, (1024, ), (1, ))
assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_96, (4096, ), (1, ))
assert_size_stride(primals_97, (4096, ), (1, ))
assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_99, (1024, ), (1, ))
assert_size_stride(primals_100, (1024, ), (1, ))
assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_102, (1024, ), (1, ))
assert_size_stride(primals_103, (1024, ), (1, ))
assert_size_stride(primals_104, (1024, ), (1, ))
assert_size_stride(primals_105, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_106, (4096, ), (1, ))
assert_size_stride(primals_107, (4096, ), (1, ))
assert_size_stride(primals_108, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_109, (1024, ), (1, ))
assert_size_stride(primals_110, (1024, ), (1, ))
assert_size_stride(primals_111, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_112, (1024, ), (1, ))
assert_size_stride(primals_113, (1024, ), (1, ))
assert_size_stride(primals_114, (1024, ), (1, ))
assert_size_stride(primals_115, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_116, (4096, ), (1, ))
assert_size_stride(primals_117, (4096, ), (1, ))
assert_size_stride(primals_118, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_119, (1024, ), (1, ))
assert_size_stride(primals_120, (1024, ), (1, ))
assert_size_stride(primals_121, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_122, (1024, ), (1, ))
assert_size_stride(primals_123, (1024, ), (1, ))
assert_size_stride(primals_124, (1024, ), (1, ))
assert_size_stride(primals_125, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_126, (4096, ), (1, ))
assert_size_stride(primals_127, (4096, ), (1, ))
assert_size_stride(primals_128, (8192, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_129, (2048, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_130, (2048, ), (1, ))
assert_size_stride(primals_131, (2048, ), (1, ))
assert_size_stride(primals_132, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_133, (2048, ), (1, ))
assert_size_stride(primals_134, (2048, ), (1, ))
assert_size_stride(primals_135, (2048, ), (1, ))
assert_size_stride(primals_136, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_137, (8192, ), (1, ))
assert_size_stride(primals_138, (8192, ), (1, ))
assert_size_stride(primals_139, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_140, (2048, ), (1, ))
assert_size_stride(primals_141, (2048, ), (1, ))
assert_size_stride(primals_142, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_143, (2048, ), (1, ))
assert_size_stride(primals_144, (2048, ), (1, ))
assert_size_stride(primals_145, (2048, ), (1, ))
assert_size_stride(primals_146, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_147, (8192, ), (1, ))
assert_size_stride(primals_148, (8192, ), (1, ))
assert_size_stride(primals_149, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_150, (2048, ), (1, ))
assert_size_stride(primals_151, (2048, ), (1, ))
assert_size_stride(primals_152, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_153, (2048, ), (1, ))
assert_size_stride(primals_154, (2048, ), (1, ))
assert_size_stride(primals_155, (2048, ), (1, ))
assert_size_stride(primals_156, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_157, (8192, ), (1, ))
assert_size_stride(primals_158, (8192, ), (1, ))
assert_size_stride(primals_159, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_160, (2048, ), (1, ))
assert_size_stride(primals_161, (2048, ), (1, ))
assert_size_stride(primals_162, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_163, (2048, ), (1, ))
assert_size_stride(primals_164, (2048, ), (1, ))
assert_size_stride(primals_165, (2048, ), (1, ))
assert_size_stride(primals_166, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_167, (8192, ), (1, ))
assert_size_stride(primals_168, (8192, ), (1, ))
assert_size_stride(primals_169, (21843, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_170, (21843, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 768, 49, grid=grid(768, 49), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_9, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_9
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_19, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_19
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_29, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_29
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_39, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_39
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_50, buf6, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_50
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_60, buf7, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_60
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_70, buf8, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_70
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_80, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_80
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_91, buf10, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_91
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_101, buf11, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_101
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_111, buf12, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_111
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_121, buf13, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_121
buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_132, buf14, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_132
buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_142, buf15, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_142
buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_152, buf16, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_152
buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144, 2048), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_162, buf17, 4194304, 9, grid=grid(4194304, 9), stream=stream0)
del primals_162
buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf19 # reuse
buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_6.run(buf21, buf0, buf22, 256, 147, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256), torch.float32)
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_7.run(buf23, buf24, 1183744, grid=grid(1183744), stream=stream0)
buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.int8)
# Topologically Sorted Source Nodes: [input_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf24, buf25, buf26, 262144, grid=grid(262144), stream=stream0)
buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf25, buf27, buf28, buf30, 128, 2048, grid=grid(128), stream=stream0)
buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf25, buf27, buf28, primals_3, primals_4, buf31, 262144, grid=grid(262144), stream=stream0)
del primals_4
buf33 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf35 = reinterpret_tensor(buf33, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf33 # reuse
buf36 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf35, primals_5, buf36, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
buf37 = extern_kernels.convolution(buf31, buf36, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf39 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf41 = reinterpret_tensor(buf39, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf39 # reuse
buf42 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_12.run(buf41, primals_6, buf42, 256, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf31, buf42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf44 = buf28; del buf28 # reuse
buf45 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf47 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_1], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf43, buf44, buf45, buf47, 128, 2048, grid=grid(128), stream=stream0)
buf48 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_1, relu_1], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf43, buf44, buf45, primals_7, primals_8, buf48, 262144, grid=grid(262144), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf49 = extern_kernels.convolution(buf48, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf50 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf50, primals_10, 262144, grid=grid(262144), stream=stream0)
del primals_10
buf51 = buf45; del buf45 # reuse
buf52 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf54 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf50, buf51, buf52, buf54, 128, 2048, grid=grid(128), stream=stream0)
buf55 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_2, relu_2], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf50, buf51, buf52, primals_11, primals_12, buf55, 262144, grid=grid(262144), stream=stream0)
del primals_12
buf57 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf59 = reinterpret_tensor(buf57, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf57 # reuse
buf60 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf59, primals_13, buf60, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf61 = extern_kernels.convolution(buf55, buf60, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf62 = buf37; del buf37 # reuse
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.add]
triton_poi_fused_add_14.run(buf62, buf61, 1048576, grid=grid(1048576), stream=stream0)
buf63 = buf52; del buf52 # reuse
buf64 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf62, buf63, buf64, buf66, 128, 8192, grid=grid(128), stream=stream0)
buf67 = buf61; del buf61 # reuse
# Topologically Sorted Source Nodes: [group_norm_3, out_4], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_16.run(buf62, buf63, buf64, primals_14, primals_15, buf67, 1048576, grid=grid(1048576), stream=stream0)
del primals_15
buf69 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf71 = reinterpret_tensor(buf69, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf69 # reuse
buf72 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_4, sub_4, add_5, sqrt_4, w_4], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf71, primals_16, buf72, 256, 1024, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf73 = extern_kernels.convolution(buf67, buf72, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf73, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf74 = buf64; del buf64 # reuse
buf75 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf77 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_4], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf73, buf74, buf75, buf77, 128, 2048, grid=grid(128), stream=stream0)
buf78 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_4, relu_4], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf73, buf74, buf75, primals_17, primals_18, buf78, 262144, grid=grid(262144), stream=stream0)
del primals_18
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution]
buf79 = extern_kernels.convolution(buf78, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf80 = buf79; del buf79 # reuse
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf80, primals_20, 262144, grid=grid(262144), stream=stream0)
del primals_20
buf81 = buf75; del buf75 # reuse
buf82 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf84 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_5], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf80, buf81, buf82, buf84, 128, 2048, grid=grid(128), stream=stream0)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_5, relu_5], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf80, buf81, buf82, primals_21, primals_22, buf85, 262144, grid=grid(262144), stream=stream0)
del primals_22
buf87 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf89 = reinterpret_tensor(buf87, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf87 # reuse
buf90 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf89, primals_23, buf90, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf91 = extern_kernels.convolution(buf85, buf90, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf92 = buf91; del buf91 # reuse
# Topologically Sorted Source Nodes: [input_5], Original ATen: [aten.add]
triton_poi_fused_add_18.run(buf92, buf62, 1048576, grid=grid(1048576), stream=stream0)
buf93 = buf82; del buf82 # reuse
buf94 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf96 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_6], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf92, buf93, buf94, buf96, 128, 8192, grid=grid(128), stream=stream0)
buf97 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1, 16384, 1024), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [group_norm_6, out_8], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_16.run(buf92, buf93, buf94, primals_24, primals_25, buf97, 1048576, grid=grid(1048576), stream=stream0)
del primals_25
buf99 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf101 = reinterpret_tensor(buf99, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf99 # reuse
buf102 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_6, sub_6, add_8, sqrt_6, w_6], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf101, primals_26, buf102, 256, 1024, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.convolution]
buf103 = extern_kernels.convolution(buf97, buf102, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf104 = buf94; del buf94 # reuse
buf105 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf107 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_7], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf103, buf104, buf105, buf107, 128, 2048, grid=grid(128), stream=stream0)
buf108 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_7, relu_7], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf103, buf104, buf105, primals_27, primals_28, buf108, 262144, grid=grid(262144), stream=stream0)
del primals_28
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(buf108, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf110 = buf109; del buf109 # reuse
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf110, primals_30, 262144, grid=grid(262144), stream=stream0)
del primals_30
buf111 = buf105; del buf105 # reuse
buf112 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf114 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_8], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf110, buf111, buf112, buf114, 128, 2048, grid=grid(128), stream=stream0)
buf115 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_8, relu_8], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf110, buf111, buf112, primals_31, primals_32, buf115, 262144, grid=grid(262144), stream=stream0)
del primals_32
buf117 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf119 = reinterpret_tensor(buf117, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf117 # reuse
buf120 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_7, sub_7, add_9, sqrt_7, w_7], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf119, primals_33, buf120, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution]
buf121 = extern_kernels.convolution(buf115, buf120, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf122 = buf121; del buf121 # reuse
# Topologically Sorted Source Nodes: [input_6], Original ATen: [aten.add]
triton_poi_fused_add_18.run(buf122, buf92, 1048576, grid=grid(1048576), stream=stream0)
buf123 = buf112; del buf112 # reuse
buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf126 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_9], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf122, buf123, buf124, buf126, 128, 8192, grid=grid(128), stream=stream0)
buf127 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_9, out_12], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_16.run(buf122, buf123, buf124, primals_34, primals_35, buf127, 1048576, grid=grid(1048576), stream=stream0)
del primals_35
buf129 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf131 = reinterpret_tensor(buf129, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf129 # reuse
buf132 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_8, sub_8, add_11, sqrt_8, w_8], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf131, primals_36, buf132, 256, 1024, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.convolution]
buf133 = extern_kernels.convolution(buf127, buf132, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf133, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf134 = buf124; del buf124 # reuse
buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf137 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_10], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf133, buf134, buf135, buf137, 128, 2048, grid=grid(128), stream=stream0)
buf138 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_10, relu_10], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf133, buf134, buf135, primals_37, primals_38, buf138, 262144, grid=grid(262144), stream=stream0)
del primals_38
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution]
buf139 = extern_kernels.convolution(buf138, buf5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf139, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf140 = buf139; del buf139 # reuse
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution]
triton_poi_fused_convolution_13.run(buf140, primals_40, 262144, grid=grid(262144), stream=stream0)
del primals_40
buf141 = buf135; del buf135 # reuse
buf142 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_11], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_9.run(buf140, buf141, buf142, buf144, 128, 2048, grid=grid(128), stream=stream0)
buf145 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_11, relu_11], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_10.run(buf140, buf141, buf142, primals_41, primals_42, buf145, 262144, grid=grid(262144), stream=stream0)
del primals_42
buf147 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf149 = reinterpret_tensor(buf147, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf147 # reuse
buf150 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_9, sub_9, add_12, sqrt_9, w_9], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf149, primals_43, buf150, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.convolution]
buf151 = extern_kernels.convolution(buf145, buf150, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf152 = buf151; del buf151 # reuse
# Topologically Sorted Source Nodes: [input_7], Original ATen: [aten.add]
triton_poi_fused_add_18.run(buf152, buf122, 1048576, grid=grid(1048576), stream=stream0)
buf153 = buf142; del buf142 # reuse
buf154 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf156 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_12], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf152, buf153, buf154, buf156, 128, 8192, grid=grid(128), stream=stream0)
buf157 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_12, out_16], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_16.run(buf152, buf153, buf154, primals_44, primals_45, buf157, 1048576, grid=grid(1048576), stream=stream0)
del primals_45
buf159 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf161 = reinterpret_tensor(buf159, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf159 # reuse
buf162 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_10, sub_10, add_14, sqrt_10, w_10], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_19.run(buf161, primals_46, buf162, 2048, 1024, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.convolution]
buf163 = extern_kernels.convolution(buf157, buf162, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf165 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf167 = reinterpret_tensor(buf165, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf165 # reuse
buf168 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_11, sub_11, add_15, sqrt_11, w_11], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_20.run(buf167, primals_47, buf168, 512, 1024, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.convolution]
buf169 = extern_kernels.convolution(buf157, buf168, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 512, 16, 16), (131072, 1, 8192, 512))
buf170 = buf154; del buf154 # reuse
buf171 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf173 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_13], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_21.run(buf169, buf170, buf171, buf173, 128, 4096, grid=grid(128), stream=stream0)
buf174 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_13, relu_13], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_22.run(buf169, buf170, buf171, primals_48, primals_49, buf174, 524288, grid=grid(524288), stream=stream0)
del primals_49
# Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution]
buf175 = extern_kernels.convolution(buf174, buf6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf176 = buf175; del buf175 # reuse
# Topologically Sorted Source Nodes: [out_18], Original ATen: [aten.convolution]
triton_poi_fused_convolution_23.run(buf176, primals_51, 131072, grid=grid(131072), stream=stream0)
del primals_51
buf177 = buf171; del buf171 # reuse
buf178 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf180 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_14], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_24.run(buf176, buf177, buf178, buf180, 128, 1024, grid=grid(128), stream=stream0)
buf181 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_14, relu_14], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_25.run(buf176, buf177, buf178, primals_52, primals_53, buf181, 131072, grid=grid(131072), stream=stream0)
del primals_53
buf183 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf185 = reinterpret_tensor(buf183, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf183 # reuse
buf186 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_12, sub_12, add_16, sqrt_12, w_12], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf185, primals_54, buf186, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_19], Original ATen: [aten.convolution]
buf187 = extern_kernels.convolution(buf181, buf186, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf187, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf188 = buf163; del buf163 # reuse
# Topologically Sorted Source Nodes: [input_8], Original ATen: [aten.add]
triton_poi_fused_add_27.run(buf188, buf187, 524288, grid=grid(524288), stream=stream0)
buf189 = buf178; del buf178 # reuse
buf190 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf192 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf188, buf189, buf190, buf192, 128, 4096, grid=grid(128), stream=stream0)
buf193 = buf187; del buf187 # reuse
# Topologically Sorted Source Nodes: [group_norm_15, out_20], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf188, buf189, buf190, primals_55, primals_56, buf193, 524288, grid=grid(524288), stream=stream0)
del primals_56
buf195 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf197 = reinterpret_tensor(buf195, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf195 # reuse
buf198 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_13, sub_13, add_18, sqrt_13, w_13], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf197, primals_57, buf198, 512, 2048, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution]
buf199 = extern_kernels.convolution(buf193, buf198, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf199, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf200 = buf190; del buf190 # reuse
buf201 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf203 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_24.run(buf199, buf200, buf201, buf203, 128, 1024, grid=grid(128), stream=stream0)
buf204 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_16, relu_16], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_25.run(buf199, buf200, buf201, primals_58, primals_59, buf204, 131072, grid=grid(131072), stream=stream0)
del primals_59
# Topologically Sorted Source Nodes: [out_22], Original ATen: [aten.convolution]
buf205 = extern_kernels.convolution(buf204, buf7, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf205, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf206 = buf205; del buf205 # reuse
# Topologically Sorted Source Nodes: [out_22], Original ATen: [aten.convolution]
triton_poi_fused_convolution_23.run(buf206, primals_61, 131072, grid=grid(131072), stream=stream0)
del primals_61
buf207 = buf201; del buf201 # reuse
buf208 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf210 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_17], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_24.run(buf206, buf207, buf208, buf210, 128, 1024, grid=grid(128), stream=stream0)
buf211 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_17, relu_17], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_25.run(buf206, buf207, buf208, primals_62, primals_63, buf211, 131072, grid=grid(131072), stream=stream0)
del primals_63
buf213 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf215 = reinterpret_tensor(buf213, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf213 # reuse
buf216 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_19, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf215, primals_64, buf216, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_23], Original ATen: [aten.convolution]
buf217 = extern_kernels.convolution(buf211, buf216, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf217, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf218 = buf217; del buf217 # reuse
# Topologically Sorted Source Nodes: [input_9], Original ATen: [aten.add]
triton_poi_fused_add_31.run(buf218, buf188, 524288, grid=grid(524288), stream=stream0)
buf219 = buf208; del buf208 # reuse
buf220 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf222 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_18], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf218, buf219, buf220, buf222, 128, 4096, grid=grid(128), stream=stream0)
buf223 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_18, out_24], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf218, buf219, buf220, primals_65, primals_66, buf223, 524288, grid=grid(524288), stream=stream0)
del primals_66
buf225 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf227 = reinterpret_tensor(buf225, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf225 # reuse
buf228 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_21, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf227, primals_67, buf228, 512, 2048, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [out_25], Original ATen: [aten.convolution]
buf229 = extern_kernels.convolution(buf223, buf228, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf229, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf230 = buf220; del buf220 # reuse
buf231 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf233 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_19], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_24.run(buf229, buf230, buf231, buf233, 128, 1024, grid=grid(128), stream=stream0)
buf234 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_19, relu_19], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_25.run(buf229, buf230, buf231, primals_68, primals_69, buf234, 131072, grid=grid(131072), stream=stream0)
del primals_69
# Topologically Sorted Source Nodes: [out_26], Original ATen: [aten.convolution]
buf235 = extern_kernels.convolution(buf234, buf8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf235, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf236 = buf235; del buf235 # reuse
# Topologically Sorted Source Nodes: [out_26], Original ATen: [aten.convolution]
triton_poi_fused_convolution_23.run(buf236, primals_71, 131072, grid=grid(131072), stream=stream0)
del primals_71
buf237 = buf231; del buf231 # reuse
buf238 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf240 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_20], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_24.run(buf236, buf237, buf238, buf240, 128, 1024, grid=grid(128), stream=stream0)
buf241 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_20, relu_20], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_25.run(buf236, buf237, buf238, primals_72, primals_73, buf241, 131072, grid=grid(131072), stream=stream0)
del primals_73
buf243 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf245 = reinterpret_tensor(buf243, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf243 # reuse
buf246 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_22, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf245, primals_74, buf246, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_27], Original ATen: [aten.convolution]
buf247 = extern_kernels.convolution(buf241, buf246, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf247, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf248 = buf247; del buf247 # reuse
# Topologically Sorted Source Nodes: [input_10], Original ATen: [aten.add]
triton_poi_fused_add_31.run(buf248, buf218, 524288, grid=grid(524288), stream=stream0)
buf249 = buf238; del buf238 # reuse
buf250 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf252 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_21], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf248, buf249, buf250, buf252, 128, 4096, grid=grid(128), stream=stream0)
buf253 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_21, out_28], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf248, buf249, buf250, primals_75, primals_76, buf253, 524288, grid=grid(524288), stream=stream0)
del primals_76
buf255 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf257 = reinterpret_tensor(buf255, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf255 # reuse
buf258 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_24, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf257, primals_77, buf258, 512, 2048, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [out_29], Original ATen: [aten.convolution]
buf259 = extern_kernels.convolution(buf253, buf258, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf259, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf260 = buf250; del buf250 # reuse
buf261 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf263 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_22], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_24.run(buf259, buf260, buf261, buf263, 128, 1024, grid=grid(128), stream=stream0)
buf264 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_22, relu_22], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_25.run(buf259, buf260, buf261, primals_78, primals_79, buf264, 131072, grid=grid(131072), stream=stream0)
del primals_79
# Topologically Sorted Source Nodes: [out_30], Original ATen: [aten.convolution]
buf265 = extern_kernels.convolution(buf264, buf9, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf265, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf266 = buf265; del buf265 # reuse
# Topologically Sorted Source Nodes: [out_30], Original ATen: [aten.convolution]
triton_poi_fused_convolution_23.run(buf266, primals_81, 131072, grid=grid(131072), stream=stream0)
del primals_81
buf267 = buf261; del buf261 # reuse
buf268 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf270 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_23], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_24.run(buf266, buf267, buf268, buf270, 128, 1024, grid=grid(128), stream=stream0)
buf271 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_23, relu_23], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_25.run(buf266, buf267, buf268, primals_82, primals_83, buf271, 131072, grid=grid(131072), stream=stream0)
del primals_83
buf273 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf275 = reinterpret_tensor(buf273, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf273 # reuse
buf276 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_25, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_26.run(buf275, primals_84, buf276, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_31], Original ATen: [aten.convolution]
buf277 = extern_kernels.convolution(buf271, buf276, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf277, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf278 = buf277; del buf277 # reuse
# Topologically Sorted Source Nodes: [input_11], Original ATen: [aten.add]
triton_poi_fused_add_31.run(buf278, buf248, 524288, grid=grid(524288), stream=stream0)
buf279 = buf268; del buf268 # reuse
buf280 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf282 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_24], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf278, buf279, buf280, buf282, 128, 4096, grid=grid(128), stream=stream0)
buf283 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_24, out_32], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_29.run(buf278, buf279, buf280, primals_85, primals_86, buf283, 524288, grid=grid(524288), stream=stream0)
del primals_86
buf285 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf287 = reinterpret_tensor(buf285, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf285 # reuse
buf288 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_19, sub_19, add_27, sqrt_19, w_19], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_32.run(buf287, primals_87, buf288, 4096, 2048, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution]
buf289 = extern_kernels.convolution(buf283, buf288, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf289, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf291 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf293 = reinterpret_tensor(buf291, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf291 # reuse
buf294 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_20, sub_20, add_28, sqrt_20, w_20], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_33.run(buf293, primals_88, buf294, 1024, 2048, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_33], Original ATen: [aten.convolution]
buf295 = extern_kernels.convolution(buf283, buf294, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf295, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf296 = buf280; del buf280 # reuse
buf297 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf299 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_34.run(buf295, buf296, buf297, buf299, 128, 2048, grid=grid(128), stream=stream0)
buf300 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_25, relu_25], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_35.run(buf295, buf296, buf297, primals_89, primals_90, buf300, 262144, grid=grid(262144), stream=stream0)
del primals_90
# Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.convolution]
buf301 = extern_kernels.convolution(buf300, buf10, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf301, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf302 = buf301; del buf301 # reuse
# Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.convolution]
triton_poi_fused_convolution_36.run(buf302, primals_92, 65536, grid=grid(65536), stream=stream0)
del primals_92
buf303 = buf297; del buf297 # reuse
buf304 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf306 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_26], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_37.run(buf302, buf303, buf304, buf306, 128, 512, grid=grid(128), stream=stream0)
buf307 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_26, relu_26], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_38.run(buf302, buf303, buf304, primals_93, primals_94, buf307, 65536, grid=grid(65536), stream=stream0)
del primals_94
buf309 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf311 = reinterpret_tensor(buf309, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf309 # reuse
buf312 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_21, sub_21, add_29, sqrt_21, w_21], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf311, primals_95, buf312, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [out_35], Original ATen: [aten.convolution]
buf313 = extern_kernels.convolution(buf307, buf312, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf313, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf314 = buf289; del buf289 # reuse
# Topologically Sorted Source Nodes: [input_12], Original ATen: [aten.add]
triton_poi_fused_add_40.run(buf314, buf313, 262144, grid=grid(262144), stream=stream0)
buf315 = buf304; del buf304 # reuse
buf316 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf318 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_27], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf314, buf315, buf316, buf318, 128, 2048, grid=grid(128), stream=stream0)
buf319 = buf313; del buf313 # reuse
# Topologically Sorted Source Nodes: [group_norm_27, out_36], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_42.run(buf314, buf315, buf316, primals_96, primals_97, buf319, 262144, grid=grid(262144), stream=stream0)
del primals_97
buf321 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf323 = reinterpret_tensor(buf321, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf321 # reuse
buf324 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_22, sub_22, add_31, sqrt_22, w_22], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf323, primals_98, buf324, 1024, 4096, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_37], Original ATen: [aten.convolution]
buf325 = extern_kernels.convolution(buf319, buf324, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf325, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf326 = buf316; del buf316 # reuse
buf327 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf329 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_37.run(buf325, buf326, buf327, buf329, 128, 512, grid=grid(128), stream=stream0)
buf330 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_28, relu_28], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_38.run(buf325, buf326, buf327, primals_99, primals_100, buf330, 65536, grid=grid(65536), stream=stream0)
del primals_100
# Topologically Sorted Source Nodes: [out_38], Original ATen: [aten.convolution]
buf331 = extern_kernels.convolution(buf330, buf11, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf331, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf332 = buf331; del buf331 # reuse
# Topologically Sorted Source Nodes: [out_38], Original ATen: [aten.convolution]
triton_poi_fused_convolution_36.run(buf332, primals_102, 65536, grid=grid(65536), stream=stream0)
del primals_102
buf333 = buf327; del buf327 # reuse
buf334 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf336 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_37.run(buf332, buf333, buf334, buf336, 128, 512, grid=grid(128), stream=stream0)
buf337 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_29, relu_29], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_38.run(buf332, buf333, buf334, primals_103, primals_104, buf337, 65536, grid=grid(65536), stream=stream0)
del primals_104
buf339 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf341 = reinterpret_tensor(buf339, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf339 # reuse
buf342 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_23, sub_23, add_32, sqrt_23, w_23], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf341, primals_105, buf342, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [out_39], Original ATen: [aten.convolution]
buf343 = extern_kernels.convolution(buf337, buf342, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf343, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf344 = buf343; del buf343 # reuse
# Topologically Sorted Source Nodes: [input_13], Original ATen: [aten.add]
triton_poi_fused_add_44.run(buf344, buf314, 262144, grid=grid(262144), stream=stream0)
buf345 = buf334; del buf334 # reuse
buf346 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf348 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_30], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf344, buf345, buf346, buf348, 128, 2048, grid=grid(128), stream=stream0)
buf349 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_30, out_40], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_42.run(buf344, buf345, buf346, primals_106, primals_107, buf349, 262144, grid=grid(262144), stream=stream0)
del primals_107
buf351 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf353 = reinterpret_tensor(buf351, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf351 # reuse
buf354 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_24, sub_24, add_34, sqrt_24, w_24], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf353, primals_108, buf354, 1024, 4096, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_41], Original ATen: [aten.convolution]
buf355 = extern_kernels.convolution(buf349, buf354, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf355, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf356 = buf346; del buf346 # reuse
buf357 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf359 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_31], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_37.run(buf355, buf356, buf357, buf359, 128, 512, grid=grid(128), stream=stream0)
buf360 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_31, relu_31], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_38.run(buf355, buf356, buf357, primals_109, primals_110, buf360, 65536, grid=grid(65536), stream=stream0)
del primals_110
# Topologically Sorted Source Nodes: [out_42], Original ATen: [aten.convolution]
buf361 = extern_kernels.convolution(buf360, buf12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf361, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf362 = buf361; del buf361 # reuse
# Topologically Sorted Source Nodes: [out_42], Original ATen: [aten.convolution]
triton_poi_fused_convolution_36.run(buf362, primals_112, 65536, grid=grid(65536), stream=stream0)
del primals_112
buf363 = buf357; del buf357 # reuse
buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_32], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_37.run(buf362, buf363, buf364, buf366, 128, 512, grid=grid(128), stream=stream0)
buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_32, relu_32], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_38.run(buf362, buf363, buf364, primals_113, primals_114, buf367, 65536, grid=grid(65536), stream=stream0)
del primals_114
buf369 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf371 = reinterpret_tensor(buf369, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf369 # reuse
buf372 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_25, sub_25, add_35, sqrt_25, w_25], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf371, primals_115, buf372, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [out_43], Original ATen: [aten.convolution]
buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf373, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf374 = buf373; del buf373 # reuse
# Topologically Sorted Source Nodes: [input_14], Original ATen: [aten.add]
triton_poi_fused_add_44.run(buf374, buf344, 262144, grid=grid(262144), stream=stream0)
buf375 = buf364; del buf364 # reuse
buf376 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf378 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_33], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf374, buf375, buf376, buf378, 128, 2048, grid=grid(128), stream=stream0)
buf379 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_33, out_44], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_42.run(buf374, buf375, buf376, primals_116, primals_117, buf379, 262144, grid=grid(262144), stream=stream0)
del primals_117
buf381 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf383 = reinterpret_tensor(buf381, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf381 # reuse
buf384 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_26, sub_26, add_37, sqrt_26, w_26], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf383, primals_118, buf384, 1024, 4096, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_45], Original ATen: [aten.convolution]
buf385 = extern_kernels.convolution(buf379, buf384, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf385, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf386 = buf376; del buf376 # reuse
buf387 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf389 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_34], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_37.run(buf385, buf386, buf387, buf389, 128, 512, grid=grid(128), stream=stream0)
buf390 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_34, relu_34], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_38.run(buf385, buf386, buf387, primals_119, primals_120, buf390, 65536, grid=grid(65536), stream=stream0)
del primals_120
# Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution]
buf391 = extern_kernels.convolution(buf390, buf13, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf391, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf392 = buf391; del buf391 # reuse
# Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution]
triton_poi_fused_convolution_36.run(buf392, primals_122, 65536, grid=grid(65536), stream=stream0)
del primals_122
buf393 = buf387; del buf387 # reuse
buf394 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf396 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_35], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_37.run(buf392, buf393, buf394, buf396, 128, 512, grid=grid(128), stream=stream0)
buf397 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_35, relu_35], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_38.run(buf392, buf393, buf394, primals_123, primals_124, buf397, 65536, grid=grid(65536), stream=stream0)
del primals_124
buf399 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf401 = reinterpret_tensor(buf399, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf399 # reuse
buf402 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_38, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_39.run(buf401, primals_125, buf402, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [out_47], Original ATen: [aten.convolution]
buf403 = extern_kernels.convolution(buf397, buf402, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf403, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf404 = buf403; del buf403 # reuse
# Topologically Sorted Source Nodes: [input_15], Original ATen: [aten.add]
triton_poi_fused_add_44.run(buf404, buf374, 262144, grid=grid(262144), stream=stream0)
buf405 = buf394; del buf394 # reuse
buf406 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_36], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf404, buf405, buf406, buf408, 128, 2048, grid=grid(128), stream=stream0)
buf409 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_36, out_48], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_42.run(buf404, buf405, buf406, primals_126, primals_127, buf409, 262144, grid=grid(262144), stream=stream0)
del primals_127
buf411 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32)
buf413 = reinterpret_tensor(buf411, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf411 # reuse
buf414 = empty_strided_cuda((8192, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_40, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_45.run(buf413, primals_128, buf414, 8192, 4096, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.convolution]
buf415 = extern_kernels.convolution(buf409, buf414, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf415, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf417 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf419 = reinterpret_tensor(buf417, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf417 # reuse
buf420 = empty_strided_cuda((2048, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_41, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_46.run(buf419, primals_129, buf420, 2048, 4096, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_49], Original ATen: [aten.convolution]
buf421 = extern_kernels.convolution(buf409, buf420, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf421, (4, 2048, 4, 4), (32768, 1, 8192, 2048))
buf422 = buf406; del buf406 # reuse
buf423 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf425 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_47.run(buf421, buf422, buf423, buf425, 128, 1024, grid=grid(128), stream=stream0)
buf426 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_37, relu_37], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_48.run(buf421, buf422, buf423, primals_130, primals_131, buf426, 131072, grid=grid(131072), stream=stream0)
del primals_131
# Topologically Sorted Source Nodes: [out_50], Original ATen: [aten.convolution]
buf427 = extern_kernels.convolution(buf426, buf14, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf427, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf428 = buf427; del buf427 # reuse
# Topologically Sorted Source Nodes: [out_50], Original ATen: [aten.convolution]
triton_poi_fused_convolution_49.run(buf428, primals_133, 32768, grid=grid(32768), stream=stream0)
del primals_133
buf429 = buf423; del buf423 # reuse
buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_50.run(buf428, buf429, buf430, buf432, 128, 256, grid=grid(128), stream=stream0)
buf433 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_38, relu_38], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_51.run(buf428, buf429, buf430, primals_134, primals_135, buf433, 32768, grid=grid(32768), stream=stream0)
del primals_135
buf435 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32)
buf437 = reinterpret_tensor(buf435, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf435 # reuse
buf438 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_42, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf437, primals_136, buf438, 8192, 2048, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [out_51], Original ATen: [aten.convolution]
buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf439, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf440 = buf415; del buf415 # reuse
# Topologically Sorted Source Nodes: [input_16], Original ATen: [aten.add]
triton_poi_fused_add_53.run(buf440, buf439, 131072, grid=grid(131072), stream=stream0)
buf441 = buf430; del buf430 # reuse
buf442 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf444 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_39], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_54.run(buf440, buf441, buf442, buf444, 128, 1024, grid=grid(128), stream=stream0)
buf445 = buf439; del buf439 # reuse
# Topologically Sorted Source Nodes: [group_norm_39, out_52], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_55.run(buf440, buf441, buf442, primals_137, primals_138, buf445, 131072, grid=grid(131072), stream=stream0)
del primals_138
buf447 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf449 = reinterpret_tensor(buf447, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf447 # reuse
buf450 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_44, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_56.run(buf449, primals_139, buf450, 2048, 8192, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_53], Original ATen: [aten.convolution]
buf451 = extern_kernels.convolution(buf445, buf450, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf451, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf452 = buf442; del buf442 # reuse
buf453 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf455 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_40], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_50.run(buf451, buf452, buf453, buf455, 128, 256, grid=grid(128), stream=stream0)
buf456 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_40, relu_40], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_51.run(buf451, buf452, buf453, primals_140, primals_141, buf456, 32768, grid=grid(32768), stream=stream0)
del primals_141
# Topologically Sorted Source Nodes: [out_54], Original ATen: [aten.convolution]
buf457 = extern_kernels.convolution(buf456, buf15, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf457, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf458 = buf457; del buf457 # reuse
# Topologically Sorted Source Nodes: [out_54], Original ATen: [aten.convolution]
triton_poi_fused_convolution_49.run(buf458, primals_143, 32768, grid=grid(32768), stream=stream0)
del primals_143
buf459 = buf453; del buf453 # reuse
buf460 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf462 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_41], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_50.run(buf458, buf459, buf460, buf462, 128, 256, grid=grid(128), stream=stream0)
buf463 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_41, relu_41], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_51.run(buf458, buf459, buf460, primals_144, primals_145, buf463, 32768, grid=grid(32768), stream=stream0)
del primals_145
buf465 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32)
buf467 = reinterpret_tensor(buf465, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf465 # reuse
buf468 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_32, sub_32, add_45, sqrt_32, w_32], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf467, primals_146, buf468, 8192, 2048, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [out_55], Original ATen: [aten.convolution]
buf469 = extern_kernels.convolution(buf463, buf468, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf469, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf470 = buf469; del buf469 # reuse
# Topologically Sorted Source Nodes: [input_17], Original ATen: [aten.add]
triton_poi_fused_add_57.run(buf470, buf440, 131072, grid=grid(131072), stream=stream0)
buf471 = buf460; del buf460 # reuse
buf472 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf474 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_42], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_54.run(buf470, buf471, buf472, buf474, 128, 1024, grid=grid(128), stream=stream0)
buf475 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_42, out_56], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_55.run(buf470, buf471, buf472, primals_147, primals_148, buf475, 131072, grid=grid(131072), stream=stream0)
del primals_148
buf477 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf479 = reinterpret_tensor(buf477, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf477 # reuse
buf480 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_33, sub_33, add_47, sqrt_33, w_33], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_56.run(buf479, primals_149, buf480, 2048, 8192, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_57], Original ATen: [aten.convolution]
buf481 = extern_kernels.convolution(buf475, buf480, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf481, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf482 = buf472; del buf472 # reuse
buf483 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf485 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_43], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_50.run(buf481, buf482, buf483, buf485, 128, 256, grid=grid(128), stream=stream0)
buf486 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_43, relu_43], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_51.run(buf481, buf482, buf483, primals_150, primals_151, buf486, 32768, grid=grid(32768), stream=stream0)
del primals_151
# Topologically Sorted Source Nodes: [out_58], Original ATen: [aten.convolution]
buf487 = extern_kernels.convolution(buf486, buf16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf487, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf488 = buf487; del buf487 # reuse
# Topologically Sorted Source Nodes: [out_58], Original ATen: [aten.convolution]
triton_poi_fused_convolution_49.run(buf488, primals_153, 32768, grid=grid(32768), stream=stream0)
del primals_153
buf489 = buf483; del buf483 # reuse
buf490 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf492 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_44], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_50.run(buf488, buf489, buf490, buf492, 128, 256, grid=grid(128), stream=stream0)
buf493 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_44, relu_44], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_51.run(buf488, buf489, buf490, primals_154, primals_155, buf493, 32768, grid=grid(32768), stream=stream0)
del primals_155
buf495 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32)
buf497 = reinterpret_tensor(buf495, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf495 # reuse
buf498 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_34, sub_34, add_48, sqrt_34, w_34], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf497, primals_156, buf498, 8192, 2048, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [out_59], Original ATen: [aten.convolution]
buf499 = extern_kernels.convolution(buf493, buf498, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf499, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf500 = buf499; del buf499 # reuse
# Topologically Sorted Source Nodes: [input_18], Original ATen: [aten.add]
triton_poi_fused_add_57.run(buf500, buf470, 131072, grid=grid(131072), stream=stream0)
buf501 = buf490; del buf490 # reuse
buf502 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf504 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_45], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_54.run(buf500, buf501, buf502, buf504, 128, 1024, grid=grid(128), stream=stream0)
buf505 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_45, out_60], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_55.run(buf500, buf501, buf502, primals_157, primals_158, buf505, 131072, grid=grid(131072), stream=stream0)
del primals_158
buf507 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf509 = reinterpret_tensor(buf507, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf507 # reuse
buf510 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_35, sub_35, add_50, sqrt_35, w_35], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_56.run(buf509, primals_159, buf510, 2048, 8192, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [out_61], Original ATen: [aten.convolution]
buf511 = extern_kernels.convolution(buf505, buf510, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf511, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf512 = buf502; del buf502 # reuse
buf513 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf515 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_46], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_50.run(buf511, buf512, buf513, buf515, 128, 256, grid=grid(128), stream=stream0)
buf516 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_46, relu_46], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_51.run(buf511, buf512, buf513, primals_160, primals_161, buf516, 32768, grid=grid(32768), stream=stream0)
del primals_161
# Topologically Sorted Source Nodes: [out_62], Original ATen: [aten.convolution]
buf517 = extern_kernels.convolution(buf516, buf17, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf517, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf518 = buf517; del buf517 # reuse
# Topologically Sorted Source Nodes: [out_62], Original ATen: [aten.convolution]
triton_poi_fused_convolution_49.run(buf518, primals_163, 32768, grid=grid(32768), stream=stream0)
del primals_163
buf519 = buf513; del buf513 # reuse
buf520 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf522 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_47], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_50.run(buf518, buf519, buf520, buf522, 128, 256, grid=grid(128), stream=stream0)
buf523 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_47, relu_47], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_51.run(buf518, buf519, buf520, primals_164, primals_165, buf523, 32768, grid=grid(32768), stream=stream0)
del primals_165
buf525 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192), torch.float32)
buf527 = reinterpret_tensor(buf525, (8192, 1, 1, 1), (1, 1, 1, 1), 0); del buf525 # reuse
buf528 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_36, sub_36, add_51, sqrt_36, w_36], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_52.run(buf527, primals_166, buf528, 8192, 2048, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [out_63], Original ATen: [aten.convolution]
buf529 = extern_kernels.convolution(buf523, buf528, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf529, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf530 = buf529; del buf529 # reuse
# Topologically Sorted Source Nodes: [input_19], Original ATen: [aten.add]
triton_poi_fused_add_57.run(buf530, buf500, 131072, grid=grid(131072), stream=stream0)
buf531 = reinterpret_tensor(buf520, (4, 32, 1, 1), (32, 1, 32, 32), 0); del buf520 # reuse
buf532 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf534 = reinterpret_tensor(buf532, (4, 32, 1, 1), (32, 1, 32, 32), 0); del buf532 # reuse
# Topologically Sorted Source Nodes: [input_20], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_58.run(buf534, buf530, buf531, 128, 1024, grid=grid(128), stream=stream0)
buf535 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192), torch.float32)
# Topologically Sorted Source Nodes: [input_20, input_21, input_22], Original ATen: [aten.native_group_norm, aten.relu, aten.mean]
triton_poi_fused_mean_native_group_norm_relu_59.run(buf530, buf531, buf534, primals_167, primals_168, buf535, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
buf536 = extern_kernels.convolution(buf535, primals_169, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf536, (4, 21843, 1, 1), (21843, 1, 21843, 21843))
buf537 = reinterpret_tensor(buf536, (4, 21843, 1, 1), (21843, 1, 1, 1), 0); del buf536 # reuse
# Topologically Sorted Source Nodes: [input_23], Original ATen: [aten.convolution]
triton_poi_fused_convolution_60.run(buf537, primals_170, 87372, grid=grid(87372), stream=stream0)
del primals_170
return (buf537, buf0, buf1, primals_3, primals_5, primals_6, primals_7, buf2, primals_11, primals_13, primals_14, primals_16, primals_17, buf3, primals_21, primals_23, primals_24, primals_26, primals_27, buf4, primals_31, primals_33, primals_34, primals_36, primals_37, buf5, primals_41, primals_43, primals_44, primals_46, primals_47, primals_48, buf6, primals_52, primals_54, primals_55, primals_57, primals_58, buf7, primals_62, primals_64, primals_65, primals_67, primals_68, buf8, primals_72, primals_74, primals_75, primals_77, primals_78, buf9, primals_82, primals_84, primals_85, primals_87, primals_88, primals_89, buf10, primals_93, primals_95, primals_96, primals_98, primals_99, buf11, primals_103, primals_105, primals_106, primals_108, primals_109, buf12, primals_113, primals_115, primals_116, primals_118, primals_119, buf13, primals_123, primals_125, primals_126, primals_128, primals_129, primals_130, buf14, primals_134, primals_136, primals_137, primals_139, primals_140, buf15, primals_144, primals_146, primals_147, primals_149, primals_150, buf16, primals_154, primals_156, primals_157, primals_159, primals_160, buf17, primals_164, primals_166, primals_167, primals_168, primals_169, buf21, buf22, buf24, buf25, buf26, reinterpret_tensor(buf27, (4, 32), (32, 1), 0), reinterpret_tensor(buf30, (4, 32), (32, 1), 0), buf31, buf35, buf36, buf41, buf42, buf43, reinterpret_tensor(buf44, (4, 32), (32, 1), 0), reinterpret_tensor(buf47, (4, 32), (32, 1), 0), buf48, buf50, reinterpret_tensor(buf51, (4, 32), (32, 1), 0), reinterpret_tensor(buf54, (4, 32), (32, 1), 0), buf55, buf59, buf60, buf62, reinterpret_tensor(buf63, (4, 32), (32, 1), 0), reinterpret_tensor(buf66, (4, 32), (32, 1), 0), buf67, buf71, buf72, buf73, reinterpret_tensor(buf74, (4, 32), (32, 1), 0), reinterpret_tensor(buf77, (4, 32), (32, 1), 0), buf78, buf80, reinterpret_tensor(buf81, (4, 32), (32, 1), 0), reinterpret_tensor(buf84, (4, 32), (32, 1), 0), buf85, buf89, buf90, buf92, reinterpret_tensor(buf93, (4, 32), (32, 1), 0), reinterpret_tensor(buf96, (4, 32), (32, 1), 0), buf97, buf101, buf102, buf103, reinterpret_tensor(buf104, (4, 32), (32, 1), 0), reinterpret_tensor(buf107, (4, 32), (32, 1), 0), buf108, buf110, reinterpret_tensor(buf111, (4, 32), (32, 1), 0), reinterpret_tensor(buf114, (4, 32), (32, 1), 0), buf115, buf119, buf120, buf122, reinterpret_tensor(buf123, (4, 32), (32, 1), 0), reinterpret_tensor(buf126, (4, 32), (32, 1), 0), buf127, buf131, buf132, buf133, reinterpret_tensor(buf134, (4, 32), (32, 1), 0), reinterpret_tensor(buf137, (4, 32), (32, 1), 0), buf138, buf140, reinterpret_tensor(buf141, (4, 32), (32, 1), 0), reinterpret_tensor(buf144, (4, 32), (32, 1), 0), buf145, buf149, buf150, buf152, reinterpret_tensor(buf153, (4, 32), (32, 1), 0), reinterpret_tensor(buf156, (4, 32), (32, 1), 0), buf157, buf161, buf162, buf167, buf168, buf169, reinterpret_tensor(buf170, (4, 32), (32, 1), 0), reinterpret_tensor(buf173, (4, 32), (32, 1), 0), buf174, buf176, reinterpret_tensor(buf177, (4, 32), (32, 1), 0), reinterpret_tensor(buf180, (4, 32), (32, 1), 0), buf181, buf185, buf186, buf188, reinterpret_tensor(buf189, (4, 32), (32, 1), 0), reinterpret_tensor(buf192, (4, 32), (32, 1), 0), buf193, buf197, buf198, buf199, reinterpret_tensor(buf200, (4, 32), (32, 1), 0), reinterpret_tensor(buf203, (4, 32), (32, 1), 0), buf204, buf206, reinterpret_tensor(buf207, (4, 32), (32, 1), 0), reinterpret_tensor(buf210, (4, 32), (32, 1), 0), buf211, buf215, buf216, buf218, reinterpret_tensor(buf219, (4, 32), (32, 1), 0), reinterpret_tensor(buf222, (4, 32), (32, 1), 0), buf223, buf227, buf228, buf229, reinterpret_tensor(buf230, (4, 32), (32, 1), 0), reinterpret_tensor(buf233, (4, 32), (32, 1), 0), buf234, buf236, reinterpret_tensor(buf237, (4, 32), (32, 1), 0), reinterpret_tensor(buf240, (4, 32), (32, 1), 0), buf241, buf245, buf246, buf248, reinterpret_tensor(buf249, (4, 32), (32, 1), 0), reinterpret_tensor(buf252, (4, 32), (32, 1), 0), buf253, buf257, buf258, buf259, reinterpret_tensor(buf260, (4, 32), (32, 1), 0), reinterpret_tensor(buf263, (4, 32), (32, 1), 0), buf264, buf266, reinterpret_tensor(buf267, (4, 32), (32, 1), 0), reinterpret_tensor(buf270, (4, 32), (32, 1), 0), buf271, buf275, buf276, buf278, reinterpret_tensor(buf279, (4, 32), (32, 1), 0), reinterpret_tensor(buf282, (4, 32), (32, 1), 0), buf283, buf287, buf288, buf293, buf294, buf295, reinterpret_tensor(buf296, (4, 32), (32, 1), 0), reinterpret_tensor(buf299, (4, 32), (32, 1), 0), buf300, buf302, reinterpret_tensor(buf303, (4, 32), (32, 1), 0), reinterpret_tensor(buf306, (4, 32), (32, 1), 0), buf307, buf311, buf312, buf314, reinterpret_tensor(buf315, (4, 32), (32, 1), 0), reinterpret_tensor(buf318, (4, 32), (32, 1), 0), buf319, buf323, buf324, buf325, reinterpret_tensor(buf326, (4, 32), (32, 1), 0), reinterpret_tensor(buf329, (4, 32), (32, 1), 0), buf330, buf332, reinterpret_tensor(buf333, (4, 32), (32, 1), 0), reinterpret_tensor(buf336, (4, 32), (32, 1), 0), buf337, buf341, buf342, buf344, reinterpret_tensor(buf345, (4, 32), (32, 1), 0), reinterpret_tensor(buf348, (4, 32), (32, 1), 0), buf349, buf353, buf354, buf355, reinterpret_tensor(buf356, (4, 32), (32, 1), 0), reinterpret_tensor(buf359, (4, 32), (32, 1), 0), buf360, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371, buf372, buf374, reinterpret_tensor(buf375, (4, 32), (32, 1), 0), reinterpret_tensor(buf378, (4, 32), (32, 1), 0), buf379, buf383, buf384, buf385, reinterpret_tensor(buf386, (4, 32), (32, 1), 0), reinterpret_tensor(buf389, (4, 32), (32, 1), 0), buf390, buf392, reinterpret_tensor(buf393, (4, 32), (32, 1), 0), reinterpret_tensor(buf396, (4, 32), (32, 1), 0), buf397, buf401, buf402, buf404, reinterpret_tensor(buf405, (4, 32), (32, 1), 0), reinterpret_tensor(buf408, (4, 32), (32, 1), 0), buf409, buf413, buf414, buf419, buf420, buf421, reinterpret_tensor(buf422, (4, 32), (32, 1), 0), reinterpret_tensor(buf425, (4, 32), (32, 1), 0), buf426, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0), reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437, buf438, buf440, reinterpret_tensor(buf441, (4, 32), (32, 1), 0), reinterpret_tensor(buf444, (4, 32), (32, 1), 0), buf445, buf449, buf450, buf451, reinterpret_tensor(buf452, (4, 32), (32, 1), 0), reinterpret_tensor(buf455, (4, 32), (32, 1), 0), buf456, buf458, reinterpret_tensor(buf459, (4, 32), (32, 1), 0), reinterpret_tensor(buf462, (4, 32), (32, 1), 0), buf463, buf467, buf468, buf470, reinterpret_tensor(buf471, (4, 32), (32, 1), 0), reinterpret_tensor(buf474, (4, 32), (32, 1), 0), buf475, buf479, buf480, buf481, reinterpret_tensor(buf482, (4, 32), (32, 1), 0), reinterpret_tensor(buf485, (4, 32), (32, 1), 0), buf486, buf488, reinterpret_tensor(buf489, (4, 32), (32, 1), 0), reinterpret_tensor(buf492, (4, 32), (32, 1), 0), buf493, buf497, buf498, buf500, reinterpret_tensor(buf501, (4, 32), (32, 1), 0), reinterpret_tensor(buf504, (4, 32), (32, 1), 0), buf505, buf509, buf510, buf511, reinterpret_tensor(buf512, (4, 32), (32, 1), 0), reinterpret_tensor(buf515, (4, 32), (32, 1), 0), buf516, buf518, reinterpret_tensor(buf519, (4, 32), (32, 1), 0), reinterpret_tensor(buf522, (4, 32), (32, 1), 0), buf523, buf527, buf528, buf530, buf531, buf534, buf535, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((2048, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((512, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((4096, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((1024, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_122 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_123 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_124 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_125 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_126 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_127 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_128 = rand_strided((8192, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_129 = rand_strided((2048, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_130 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_131 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_132 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_133 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_134 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_135 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_136 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_137 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_138 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_139 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_140 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_141 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_142 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_143 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_144 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_145 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_146 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_147 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_148 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_149 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_150 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_151 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_152 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_153 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_154 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_155 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_156 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_157 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_158 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_159 = rand_strided((2048, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_160 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_161 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_162 = rand_strided((2048, 2048, 3, 3), (18432, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_163 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_164 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_165 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_166 = rand_strided((8192, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_167 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_168 = rand_strided((8192, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_169 = rand_strided((21843, 8192, 1, 1), (8192, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_170 = rand_strided((21843, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121, primals_122, primals_123, primals_124, primals_125, primals_126, primals_127, primals_128, primals_129, primals_130, primals_131, primals_132, primals_133, primals_134, primals_135, primals_136, primals_137, primals_138, primals_139, primals_140, primals_141, primals_142, primals_143, primals_144, primals_145, primals_146, primals_147, primals_148, primals_149, primals_150, primals_151, primals_152, primals_153, primals_154, primals_155, primals_156, primals_157, primals_158, primals_159, primals_160, primals_161, primals_162, primals_163, primals_164, primals_165, primals_166, primals_167, primals_168, primals_169, primals_170])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import torch.utils.data
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[
f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[
f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[
f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma'])
)
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma'])
)
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma'])
)
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843,
zero_head=False):
super().__init__()
wf = width_factor
self.wf = wf
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 *
wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad',
nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3,
stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 *
wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin=
256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2,
block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid=
128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2,
block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid=
256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2,
block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid
=512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin
=2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2,
block_units[3] + 1)])))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 *
wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn.
AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf,
head_size, kernel_size=1, bias=True))]))
def forward(self, x):
x = self.head(self.body(self.root(x)))
return x
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[
f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[
f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[
f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[
f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block_units': [4, 4, 4, 4], 'width_factor': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 2048
y1 = yindex // 2048
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 2048 * x2 + 18432 * y1), tmp0, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_6(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-10
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_7(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex // 8704 % 34
x1 = xindex // 256 % 34
x3 = xindex // 295936
x4 = xindex % 8704
x6 = xindex
tmp0 = -1 + x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x1
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-8448 + x4 + 8192 * x2 + 262144 * x3), tmp10,
other=0.0)
tl.store(out_ptr0 + x6, tmp11, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 256
x1 = xindex // 256 % 16
x2 = xindex // 4096 % 16
x3 = xindex // 65536
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 17408 * x2 + 295936 * x3), None)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp5 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp7 = tl.load(in_ptr0 + (8960 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp9 = tl.load(in_ptr0 + (9216 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp11 = tl.load(in_ptr0 + (17408 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp13 = tl.load(in_ptr0 + (17664 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp15 = tl.load(in_ptr0 + (17920 + x0 + 512 * x1 + 17408 * x2 + 295936 *
x3), None)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, None)
tl.store(out_ptr1 + x4, tmp41, None)
@triton.jit
def triton_red_fused_native_group_norm_9(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_12(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_convolution_13(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_poi_fused_add_14(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 262144 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_16(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 262144
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_18(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_20(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_21(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_22(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_convolution_23(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_24(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = rindex // 16
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_25(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_26(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_27(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = rindex // 64
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_29(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 4096.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_31(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_33(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_34(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_35(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_convolution_36(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_37(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = rindex // 32
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_38(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_39(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-10
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_40(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = rindex // 128
tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_42(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_44(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_45(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_46(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask)
@triton.jit
def triton_per_fused_native_group_norm_47(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = rindex // 64
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_48(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_convolution_49(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_50(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 64
r3 = rindex // 64
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 8192 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_51(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 8192
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 256.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_52(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask)
@triton.jit
def triton_poi_fused_add_53(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_out_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_54(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = rindex // 256
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_55(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 8192
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 256), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 256), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_56(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-10
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 8192 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 8192 * x0), tmp12, rmask)
@triton.jit
def triton_poi_fused_add_57(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.load(in_ptr0 + x0, None)
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_per_fused_native_group_norm_58(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 256
r3 = rindex // 256
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 256 * x0 + 8192 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
@triton.jit
def triton_poi_fused_mean_native_group_norm_relu_59(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8192
x1 = xindex // 8192
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 32768 * x1), None)
tmp1 = tl.load(in_ptr1 + x2 // 256, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2 // 256, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (8192 + x0 + 32768 * x1), None)
tmp18 = tl.load(in_ptr0 + (16384 + x0 + 32768 * x1), None)
tmp25 = tl.load(in_ptr0 + (24576 + x0 + 32768 * x1), None)
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = tmp11 - tmp1
tmp13 = tmp12 * tmp3
tmp14 = tmp13 * tmp5
tmp15 = tmp14 + tmp7
tmp16 = triton_helpers.maximum(tmp9, tmp15)
tmp17 = tmp10 + tmp16
tmp19 = tmp18 - tmp1
tmp20 = tmp19 * tmp3
tmp21 = tmp20 * tmp5
tmp22 = tmp21 + tmp7
tmp23 = triton_helpers.maximum(tmp9, tmp22)
tmp24 = tmp17 + tmp23
tmp26 = tmp25 - tmp1
tmp27 = tmp26 * tmp3
tmp28 = tmp27 * tmp5
tmp29 = tmp28 + tmp7
tmp30 = triton_helpers.maximum(tmp9, tmp29)
tmp31 = tmp24 + tmp30
tmp32 = 4.0
tmp33 = tmp31 / tmp32
tl.store(out_ptr0 + x2, tmp33, None)
@triton.jit
def triton_poi_fused_convolution_60(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 87372
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 21843
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121, primals_122,
primals_123, primals_124, primals_125, primals_126, primals_127,
primals_128, primals_129, primals_130, primals_131, primals_132,
primals_133, primals_134, primals_135, primals_136, primals_137,
primals_138, primals_139, primals_140, primals_141, primals_142,
primals_143, primals_144, primals_145, primals_146, primals_147,
primals_148, primals_149, primals_150, primals_151, primals_152,
primals_153, primals_154, primals_155, primals_156, primals_157,
primals_158, primals_159, primals_160, primals_161, primals_162,
primals_163, primals_164, primals_165, primals_166, primals_167,
primals_168, primals_169, primals_170) = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_7, (256,), (1,))
assert_size_stride(primals_8, (256,), (1,))
assert_size_stride(primals_9, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256,), (1,))
assert_size_stride(primals_12, (256,), (1,))
assert_size_stride(primals_13, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_14, (1024,), (1,))
assert_size_stride(primals_15, (1024,), (1,))
assert_size_stride(primals_16, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (256,), (1,))
assert_size_stride(primals_19, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_20, (256,), (1,))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256,), (1,))
assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_24, (1024,), (1,))
assert_size_stride(primals_25, (1024,), (1,))
assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_27, (256,), (1,))
assert_size_stride(primals_28, (256,), (1,))
assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (256,), (1,))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (256,), (1,))
assert_size_stride(primals_33, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_34, (1024,), (1,))
assert_size_stride(primals_35, (1024,), (1,))
assert_size_stride(primals_36, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_37, (256,), (1,))
assert_size_stride(primals_38, (256,), (1,))
assert_size_stride(primals_39, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_40, (256,), (1,))
assert_size_stride(primals_41, (256,), (1,))
assert_size_stride(primals_42, (256,), (1,))
assert_size_stride(primals_43, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_44, (1024,), (1,))
assert_size_stride(primals_45, (1024,), (1,))
assert_size_stride(primals_46, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_48, (512,), (1,))
assert_size_stride(primals_49, (512,), (1,))
assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_51, (512,), (1,))
assert_size_stride(primals_52, (512,), (1,))
assert_size_stride(primals_53, (512,), (1,))
assert_size_stride(primals_54, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_55, (2048,), (1,))
assert_size_stride(primals_56, (2048,), (1,))
assert_size_stride(primals_57, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_58, (512,), (1,))
assert_size_stride(primals_59, (512,), (1,))
assert_size_stride(primals_60, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_61, (512,), (1,))
assert_size_stride(primals_62, (512,), (1,))
assert_size_stride(primals_63, (512,), (1,))
assert_size_stride(primals_64, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_65, (2048,), (1,))
assert_size_stride(primals_66, (2048,), (1,))
assert_size_stride(primals_67, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_68, (512,), (1,))
assert_size_stride(primals_69, (512,), (1,))
assert_size_stride(primals_70, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_71, (512,), (1,))
assert_size_stride(primals_72, (512,), (1,))
assert_size_stride(primals_73, (512,), (1,))
assert_size_stride(primals_74, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_75, (2048,), (1,))
assert_size_stride(primals_76, (2048,), (1,))
assert_size_stride(primals_77, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_78, (512,), (1,))
assert_size_stride(primals_79, (512,), (1,))
assert_size_stride(primals_80, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_81, (512,), (1,))
assert_size_stride(primals_82, (512,), (1,))
assert_size_stride(primals_83, (512,), (1,))
assert_size_stride(primals_84, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_85, (2048,), (1,))
assert_size_stride(primals_86, (2048,), (1,))
assert_size_stride(primals_87, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_88, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_89, (1024,), (1,))
assert_size_stride(primals_90, (1024,), (1,))
assert_size_stride(primals_91, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_92, (1024,), (1,))
assert_size_stride(primals_93, (1024,), (1,))
assert_size_stride(primals_94, (1024,), (1,))
assert_size_stride(primals_95, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_96, (4096,), (1,))
assert_size_stride(primals_97, (4096,), (1,))
assert_size_stride(primals_98, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_99, (1024,), (1,))
assert_size_stride(primals_100, (1024,), (1,))
assert_size_stride(primals_101, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_102, (1024,), (1,))
assert_size_stride(primals_103, (1024,), (1,))
assert_size_stride(primals_104, (1024,), (1,))
assert_size_stride(primals_105, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_106, (4096,), (1,))
assert_size_stride(primals_107, (4096,), (1,))
assert_size_stride(primals_108, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_109, (1024,), (1,))
assert_size_stride(primals_110, (1024,), (1,))
assert_size_stride(primals_111, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_112, (1024,), (1,))
assert_size_stride(primals_113, (1024,), (1,))
assert_size_stride(primals_114, (1024,), (1,))
assert_size_stride(primals_115, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_116, (4096,), (1,))
assert_size_stride(primals_117, (4096,), (1,))
assert_size_stride(primals_118, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_119, (1024,), (1,))
assert_size_stride(primals_120, (1024,), (1,))
assert_size_stride(primals_121, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_122, (1024,), (1,))
assert_size_stride(primals_123, (1024,), (1,))
assert_size_stride(primals_124, (1024,), (1,))
assert_size_stride(primals_125, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_126, (4096,), (1,))
assert_size_stride(primals_127, (4096,), (1,))
assert_size_stride(primals_128, (8192, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_129, (2048, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_130, (2048,), (1,))
assert_size_stride(primals_131, (2048,), (1,))
assert_size_stride(primals_132, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_133, (2048,), (1,))
assert_size_stride(primals_134, (2048,), (1,))
assert_size_stride(primals_135, (2048,), (1,))
assert_size_stride(primals_136, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_137, (8192,), (1,))
assert_size_stride(primals_138, (8192,), (1,))
assert_size_stride(primals_139, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_140, (2048,), (1,))
assert_size_stride(primals_141, (2048,), (1,))
assert_size_stride(primals_142, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_143, (2048,), (1,))
assert_size_stride(primals_144, (2048,), (1,))
assert_size_stride(primals_145, (2048,), (1,))
assert_size_stride(primals_146, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_147, (8192,), (1,))
assert_size_stride(primals_148, (8192,), (1,))
assert_size_stride(primals_149, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_150, (2048,), (1,))
assert_size_stride(primals_151, (2048,), (1,))
assert_size_stride(primals_152, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_153, (2048,), (1,))
assert_size_stride(primals_154, (2048,), (1,))
assert_size_stride(primals_155, (2048,), (1,))
assert_size_stride(primals_156, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_157, (8192,), (1,))
assert_size_stride(primals_158, (8192,), (1,))
assert_size_stride(primals_159, (2048, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_160, (2048,), (1,))
assert_size_stride(primals_161, (2048,), (1,))
assert_size_stride(primals_162, (2048, 2048, 3, 3), (18432, 9, 3, 1))
assert_size_stride(primals_163, (2048,), (1,))
assert_size_stride(primals_164, (2048,), (1,))
assert_size_stride(primals_165, (2048,), (1,))
assert_size_stride(primals_166, (8192, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_167, (8192,), (1,))
assert_size_stride(primals_168, (8192,), (1,))
assert_size_stride(primals_169, (21843, 8192, 1, 1), (8192, 1, 1, 1))
assert_size_stride(primals_170, (21843,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_9, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_9
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_19, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_19
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_29, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_29
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_39, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_39
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_50, buf6, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_50
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_60, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_60
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_70, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_70
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_80, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_80
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_91, buf10, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_91
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_101, buf11, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_101
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_111, buf12, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_111
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_121, buf13, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_121
buf14 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_132, buf14, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_132
buf15 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_142, buf15, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_142
buf16 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_152, buf16, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_152
buf17 = empty_strided_cuda((2048, 2048, 3, 3), (18432, 1, 6144,
2048), torch.float32)
triton_poi_fused_5[grid(4194304, 9)](primals_162, buf17, 4194304, 9,
XBLOCK=16, YBLOCK=128, num_warps=8, num_stages=1)
del primals_162
buf19 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf21 = reinterpret_tensor(buf19, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf19
buf22 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
triton_per_fused_add_div_sqrt_sub_var_mean_6[grid(256)](buf21, buf0,
buf22, 256, 147, XBLOCK=1, num_warps=2, num_stages=1)
buf23 = extern_kernels.convolution(buf1, buf22, stride=(2, 2),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf23, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf24 = empty_strided_cuda((4, 256, 34, 34), (295936, 1, 8704, 256),
torch.float32)
triton_poi_fused_constant_pad_nd_7[grid(1183744)](buf23, buf24,
1183744, XBLOCK=512, num_warps=8, num_stages=1)
buf25 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
buf26 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(262144)](buf24,
buf25, buf26, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf27 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf28 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf30 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf25, buf27, buf28,
buf30, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf31 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf25,
buf27, buf28, primals_3, primals_4, buf31, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_4
buf33 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf35 = reinterpret_tensor(buf33, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf33
buf36 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf35,
primals_5, buf36, 1024, 256, num_warps=2, num_stages=1)
buf37 = extern_kernels.convolution(buf31, buf36, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf37, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf39 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf41 = reinterpret_tensor(buf39, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf39
buf42 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_12[grid(256)](buf41,
primals_6, buf42, 256, 256, num_warps=2, num_stages=1)
buf43 = extern_kernels.convolution(buf31, buf42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf44 = buf28
del buf28
buf45 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf47 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf43, buf44, buf45,
buf47, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf48 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf43,
buf44, buf45, primals_7, primals_8, buf48, 262144, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_8
buf49 = extern_kernels.convolution(buf48, buf2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf49, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf50 = buf49
del buf49
triton_poi_fused_convolution_13[grid(262144)](buf50, primals_10,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_10
buf51 = buf45
del buf45
buf52 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf54 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf50, buf51, buf52,
buf54, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf55 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf50,
buf51, buf52, primals_11, primals_12, buf55, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_12
buf57 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf59 = reinterpret_tensor(buf57, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf57
buf60 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf59,
primals_13, buf60, 1024, 256, num_warps=2, num_stages=1)
buf61 = extern_kernels.convolution(buf55, buf60, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf61, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf62 = buf37
del buf37
triton_poi_fused_add_14[grid(1048576)](buf62, buf61, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf63 = buf52
del buf52
buf64 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_15[grid(128)](buf62, buf63,
buf64, buf66, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf67 = buf61
del buf61
triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf62,
buf63, buf64, primals_14, primals_15, buf67, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_15
buf69 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf71 = reinterpret_tensor(buf69, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf69
buf72 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf71,
primals_16, buf72, 256, 1024, num_warps=8, num_stages=1)
buf73 = extern_kernels.convolution(buf67, buf72, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf73, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf74 = buf64
del buf64
buf75 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf77 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf73, buf74, buf75,
buf77, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf78 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf73,
buf74, buf75, primals_17, primals_18, buf78, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_18
buf79 = extern_kernels.convolution(buf78, buf3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf79, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf80 = buf79
del buf79
triton_poi_fused_convolution_13[grid(262144)](buf80, primals_20,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_20
buf81 = buf75
del buf75
buf82 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf84 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_9[grid(128)](buf80, buf81, buf82,
buf84, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf80,
buf81, buf82, primals_21, primals_22, buf85, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_22
buf87 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf89 = reinterpret_tensor(buf87, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf87
buf90 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf89,
primals_23, buf90, 1024, 256, num_warps=2, num_stages=1)
buf91 = extern_kernels.convolution(buf85, buf90, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf91, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf92 = buf91
del buf91
triton_poi_fused_add_18[grid(1048576)](buf92, buf62, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf93 = buf82
del buf82
buf94 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf96 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_15[grid(128)](buf92, buf93,
buf94, buf96, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf97 = reinterpret_tensor(buf23, (4, 1024, 16, 16), (262144, 1,
16384, 1024), 0)
del buf23
triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf92,
buf93, buf94, primals_24, primals_25, buf97, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_25
buf99 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf101 = reinterpret_tensor(buf99, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf99
buf102 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf101,
primals_26, buf102, 256, 1024, num_warps=8, num_stages=1)
buf103 = extern_kernels.convolution(buf97, buf102, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf103, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf104 = buf94
del buf94
buf105 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf107 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf103, buf104,
buf105, buf107, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf108 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf103,
buf104, buf105, primals_27, primals_28, buf108, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_28
buf109 = extern_kernels.convolution(buf108, buf4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf110 = buf109
del buf109
triton_poi_fused_convolution_13[grid(262144)](buf110, primals_30,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_30
buf111 = buf105
del buf105
buf112 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf114 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf110, buf111,
buf112, buf114, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf115 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf110,
buf111, buf112, primals_31, primals_32, buf115, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_32
buf117 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf119 = reinterpret_tensor(buf117, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf117
buf120 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf119,
primals_33, buf120, 1024, 256, num_warps=2, num_stages=1)
buf121 = extern_kernels.convolution(buf115, buf120, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf122 = buf121
del buf121
triton_poi_fused_add_18[grid(1048576)](buf122, buf92, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf123 = buf112
del buf112
buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf126 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf122, buf123,
buf124, buf126, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf127 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384,
1024), torch.float32)
triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf122,
buf123, buf124, primals_34, primals_35, buf127, 1048576, XBLOCK
=1024, num_warps=4, num_stages=1)
del primals_35
buf129 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf131 = reinterpret_tensor(buf129, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf129
buf132 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf131,
primals_36, buf132, 256, 1024, num_warps=8, num_stages=1)
buf133 = extern_kernels.convolution(buf127, buf132, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf133, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf134 = buf124
del buf124
buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf137 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf133, buf134,
buf135, buf137, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf138 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf133,
buf134, buf135, primals_37, primals_38, buf138, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_38
buf139 = extern_kernels.convolution(buf138, buf5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf139, (4, 256, 16, 16), (65536, 1, 4096, 256))
buf140 = buf139
del buf139
triton_poi_fused_convolution_13[grid(262144)](buf140, primals_40,
262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_40
buf141 = buf135
del buf135
buf142 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_9[grid(128)](buf140, buf141,
buf142, buf144, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf145 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_10[grid(262144)](buf140,
buf141, buf142, primals_41, primals_42, buf145, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_42
buf147 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf149 = reinterpret_tensor(buf147, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf147
buf150 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(1024)](buf149,
primals_43, buf150, 1024, 256, num_warps=2, num_stages=1)
buf151 = extern_kernels.convolution(buf145, buf150, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf151, (4, 1024, 16, 16), (262144, 1, 16384, 1024))
buf152 = buf151
del buf151
triton_poi_fused_add_18[grid(1048576)](buf152, buf122, 1048576,
XBLOCK=512, num_warps=8, num_stages=1)
buf153 = buf142
del buf142
buf154 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf156 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf152, buf153,
buf154, buf156, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf157 = empty_strided_cuda((4, 1024, 16, 16), (262144, 1, 16384,
1024), torch.float32)
triton_poi_fused_native_group_norm_relu_16[grid(1048576)](buf152,
buf153, buf154, primals_44, primals_45, buf157, 1048576, XBLOCK
=1024, num_warps=4, num_stages=1)
del primals_45
buf159 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf161 = reinterpret_tensor(buf159, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf159
buf162 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf161,
primals_46, buf162, 2048, 1024, num_warps=8, num_stages=1)
buf163 = extern_kernels.convolution(buf157, buf162, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf163, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf165 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf167 = reinterpret_tensor(buf165, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf165
buf168 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_20[grid(512)](buf167,
primals_47, buf168, 512, 1024, num_warps=8, num_stages=1)
buf169 = extern_kernels.convolution(buf157, buf168, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf169, (4, 512, 16, 16), (131072, 1, 8192, 512))
buf170 = buf154
del buf154
buf171 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf173 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_21[grid(128)](buf169, buf170,
buf171, buf173, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf174 = empty_strided_cuda((4, 512, 16, 16), (131072, 1, 8192, 512
), torch.float32)
triton_poi_fused_native_group_norm_relu_22[grid(524288)](buf169,
buf170, buf171, primals_48, primals_49, buf174, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_49
buf175 = extern_kernels.convolution(buf174, buf6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf176 = buf175
del buf175
triton_poi_fused_convolution_23[grid(131072)](buf176, primals_51,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_51
buf177 = buf171
del buf171
buf178 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf180 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_24[grid(128)](buf176, buf177,
buf178, buf180, 128, 1024, num_warps=8, num_stages=1)
buf181 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf176,
buf177, buf178, primals_52, primals_53, buf181, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_53
buf183 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf185 = reinterpret_tensor(buf183, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf183
buf186 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf185,
primals_54, buf186, 2048, 512, num_warps=4, num_stages=1)
buf187 = extern_kernels.convolution(buf181, buf186, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf187, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf188 = buf163
del buf163
triton_poi_fused_add_27[grid(524288)](buf188, buf187, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
buf189 = buf178
del buf178
buf190 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf192 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf188, buf189,
buf190, buf192, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf193 = buf187
del buf187
triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf188,
buf189, buf190, primals_55, primals_56, buf193, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_56
buf195 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf197 = reinterpret_tensor(buf195, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf195
buf198 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf197,
primals_57, buf198, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf199 = extern_kernels.convolution(buf193, buf198, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf199, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf200 = buf190
del buf190
buf201 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf203 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_24[grid(128)](buf199, buf200,
buf201, buf203, 128, 1024, num_warps=8, num_stages=1)
buf204 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf199,
buf200, buf201, primals_58, primals_59, buf204, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_59
buf205 = extern_kernels.convolution(buf204, buf7, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf205, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf206 = buf205
del buf205
triton_poi_fused_convolution_23[grid(131072)](buf206, primals_61,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_61
buf207 = buf201
del buf201
buf208 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf210 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_24[grid(128)](buf206, buf207,
buf208, buf210, 128, 1024, num_warps=8, num_stages=1)
buf211 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf206,
buf207, buf208, primals_62, primals_63, buf211, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_63
buf213 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf215 = reinterpret_tensor(buf213, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf213
buf216 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf215,
primals_64, buf216, 2048, 512, num_warps=4, num_stages=1)
buf217 = extern_kernels.convolution(buf211, buf216, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf217, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf218 = buf217
del buf217
triton_poi_fused_add_31[grid(524288)](buf218, buf188, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
buf219 = buf208
del buf208
buf220 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf222 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf218, buf219,
buf220, buf222, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf223 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf218,
buf219, buf220, primals_65, primals_66, buf223, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_66
buf225 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf227 = reinterpret_tensor(buf225, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf225
buf228 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf227,
primals_67, buf228, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf229 = extern_kernels.convolution(buf223, buf228, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf229, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf230 = buf220
del buf220
buf231 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf233 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_24[grid(128)](buf229, buf230,
buf231, buf233, 128, 1024, num_warps=8, num_stages=1)
buf234 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf229,
buf230, buf231, primals_68, primals_69, buf234, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_69
buf235 = extern_kernels.convolution(buf234, buf8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf235, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf236 = buf235
del buf235
triton_poi_fused_convolution_23[grid(131072)](buf236, primals_71,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_71
buf237 = buf231
del buf231
buf238 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf240 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_24[grid(128)](buf236, buf237,
buf238, buf240, 128, 1024, num_warps=8, num_stages=1)
buf241 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf236,
buf237, buf238, primals_72, primals_73, buf241, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_73
buf243 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf245 = reinterpret_tensor(buf243, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf243
buf246 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf245,
primals_74, buf246, 2048, 512, num_warps=4, num_stages=1)
buf247 = extern_kernels.convolution(buf241, buf246, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf247, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf248 = buf247
del buf247
triton_poi_fused_add_31[grid(524288)](buf248, buf218, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
buf249 = buf238
del buf238
buf250 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf252 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf248, buf249,
buf250, buf252, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf253 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf248,
buf249, buf250, primals_75, primals_76, buf253, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_76
buf255 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf257 = reinterpret_tensor(buf255, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf255
buf258 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf257,
primals_77, buf258, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf259 = extern_kernels.convolution(buf253, buf258, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf259, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf260 = buf250
del buf250
buf261 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf263 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_24[grid(128)](buf259, buf260,
buf261, buf263, 128, 1024, num_warps=8, num_stages=1)
buf264 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf259,
buf260, buf261, primals_78, primals_79, buf264, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_79
buf265 = extern_kernels.convolution(buf264, buf9, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf265, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf266 = buf265
del buf265
triton_poi_fused_convolution_23[grid(131072)](buf266, primals_81,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_81
buf267 = buf261
del buf261
buf268 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf270 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_24[grid(128)](buf266, buf267,
buf268, buf270, 128, 1024, num_warps=8, num_stages=1)
buf271 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_25[grid(131072)](buf266,
buf267, buf268, primals_82, primals_83, buf271, 131072, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_83
buf273 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf275 = reinterpret_tensor(buf273, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf273
buf276 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_26[grid(2048)](buf275,
primals_84, buf276, 2048, 512, num_warps=4, num_stages=1)
buf277 = extern_kernels.convolution(buf271, buf276, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf277, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf278 = buf277
del buf277
triton_poi_fused_add_31[grid(524288)](buf278, buf248, 524288,
XBLOCK=1024, num_warps=4, num_stages=1)
buf279 = buf268
del buf268
buf280 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf282 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf278, buf279,
buf280, buf282, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf283 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_native_group_norm_relu_29[grid(524288)](buf278,
buf279, buf280, primals_85, primals_86, buf283, 524288, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_86
buf285 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf287 = reinterpret_tensor(buf285, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf285
buf288 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf287,
primals_87, buf288, 4096, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf289 = extern_kernels.convolution(buf283, buf288, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf289, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf291 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf293 = reinterpret_tensor(buf291, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf291
buf294 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_33[grid(1024)](buf293,
primals_88, buf294, 1024, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf295 = extern_kernels.convolution(buf283, buf294, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf295, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf296 = buf280
del buf280
buf297 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf299 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_34[grid(128)](buf295, buf296,
buf297, buf299, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf300 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_35[grid(262144)](buf295,
buf296, buf297, primals_89, primals_90, buf300, 262144, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_90
buf301 = extern_kernels.convolution(buf300, buf10, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf301, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf302 = buf301
del buf301
triton_poi_fused_convolution_36[grid(65536)](buf302, primals_92,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_92
buf303 = buf297
del buf297
buf304 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf306 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_37[grid(128)](buf302, buf303,
buf304, buf306, 128, 512, num_warps=4, num_stages=1)
buf307 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf302,
buf303, buf304, primals_93, primals_94, buf307, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_94
buf309 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf311 = reinterpret_tensor(buf309, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf309
buf312 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf311,
primals_95, buf312, 4096, 1024, num_warps=8, num_stages=1)
buf313 = extern_kernels.convolution(buf307, buf312, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf313, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf314 = buf289
del buf289
triton_poi_fused_add_40[grid(262144)](buf314, buf313, 262144,
XBLOCK=1024, num_warps=4, num_stages=1)
buf315 = buf304
del buf304
buf316 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf318 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf314, buf315,
buf316, buf318, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf319 = buf313
del buf313
triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf314,
buf315, buf316, primals_96, primals_97, buf319, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_97
buf321 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf323 = reinterpret_tensor(buf321, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf321
buf324 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf323,
primals_98, buf324, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf325 = extern_kernels.convolution(buf319, buf324, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf325, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf326 = buf316
del buf316
buf327 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf329 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_37[grid(128)](buf325, buf326,
buf327, buf329, 128, 512, num_warps=4, num_stages=1)
buf330 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf325,
buf326, buf327, primals_99, primals_100, buf330, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_100
buf331 = extern_kernels.convolution(buf330, buf11, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf331, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf332 = buf331
del buf331
triton_poi_fused_convolution_36[grid(65536)](buf332, primals_102,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_102
buf333 = buf327
del buf327
buf334 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf336 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_37[grid(128)](buf332, buf333,
buf334, buf336, 128, 512, num_warps=4, num_stages=1)
buf337 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf332,
buf333, buf334, primals_103, primals_104, buf337, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_104
buf339 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf341 = reinterpret_tensor(buf339, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf339
buf342 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf341,
primals_105, buf342, 4096, 1024, num_warps=8, num_stages=1)
buf343 = extern_kernels.convolution(buf337, buf342, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf343, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf344 = buf343
del buf343
triton_poi_fused_add_44[grid(262144)](buf344, buf314, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
buf345 = buf334
del buf334
buf346 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf348 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf344, buf345,
buf346, buf348, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf349 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf344,
buf345, buf346, primals_106, primals_107, buf349, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_107
buf351 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf353 = reinterpret_tensor(buf351, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf351
buf354 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf353,
primals_108, buf354, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf355 = extern_kernels.convolution(buf349, buf354, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf355, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf356 = buf346
del buf346
buf357 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf359 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_37[grid(128)](buf355, buf356,
buf357, buf359, 128, 512, num_warps=4, num_stages=1)
buf360 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf355,
buf356, buf357, primals_109, primals_110, buf360, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_110
buf361 = extern_kernels.convolution(buf360, buf12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf361, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf362 = buf361
del buf361
triton_poi_fused_convolution_36[grid(65536)](buf362, primals_112,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_112
buf363 = buf357
del buf357
buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_37[grid(128)](buf362, buf363,
buf364, buf366, 128, 512, num_warps=4, num_stages=1)
buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf362,
buf363, buf364, primals_113, primals_114, buf367, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_114
buf369 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf371 = reinterpret_tensor(buf369, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf369
buf372 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf371,
primals_115, buf372, 4096, 1024, num_warps=8, num_stages=1)
buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf373, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf374 = buf373
del buf373
triton_poi_fused_add_44[grid(262144)](buf374, buf344, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
buf375 = buf364
del buf364
buf376 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf378 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf374, buf375,
buf376, buf378, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf379 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf374,
buf375, buf376, primals_116, primals_117, buf379, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_117
buf381 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf383 = reinterpret_tensor(buf381, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf381
buf384 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf383,
primals_118, buf384, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf385 = extern_kernels.convolution(buf379, buf384, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf385, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf386 = buf376
del buf376
buf387 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf389 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_37[grid(128)](buf385, buf386,
buf387, buf389, 128, 512, num_warps=4, num_stages=1)
buf390 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf385,
buf386, buf387, primals_119, primals_120, buf390, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_120
buf391 = extern_kernels.convolution(buf390, buf13, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf391, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf392 = buf391
del buf391
triton_poi_fused_convolution_36[grid(65536)](buf392, primals_122,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_122
buf393 = buf387
del buf387
buf394 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf396 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_37[grid(128)](buf392, buf393,
buf394, buf396, 128, 512, num_warps=4, num_stages=1)
buf397 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_38[grid(65536)](buf392,
buf393, buf394, primals_123, primals_124, buf397, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_124
buf399 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf401 = reinterpret_tensor(buf399, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf399
buf402 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_39[grid(4096)](buf401,
primals_125, buf402, 4096, 1024, num_warps=8, num_stages=1)
buf403 = extern_kernels.convolution(buf397, buf402, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf403, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf404 = buf403
del buf403
triton_poi_fused_add_44[grid(262144)](buf404, buf374, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
buf405 = buf394
del buf394
buf406 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf404, buf405,
buf406, buf408, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf409 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_native_group_norm_relu_42[grid(262144)](buf404,
buf405, buf406, primals_126, primals_127, buf409, 262144,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_127
buf411 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf413 = reinterpret_tensor(buf411, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf411
buf414 = empty_strided_cuda((8192, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_45[grid(8192)](buf413,
primals_128, buf414, 8192, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf415 = extern_kernels.convolution(buf409, buf414, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf415, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf417 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf419 = reinterpret_tensor(buf417, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf417
buf420 = empty_strided_cuda((2048, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_46[grid(2048)](buf419,
primals_129, buf420, 2048, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf421 = extern_kernels.convolution(buf409, buf420, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf421, (4, 2048, 4, 4), (32768, 1, 8192, 2048))
buf422 = buf406
del buf406
buf423 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf425 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_47[grid(128)](buf421, buf422,
buf423, buf425, 128, 1024, num_warps=8, num_stages=1)
buf426 = empty_strided_cuda((4, 2048, 4, 4), (32768, 1, 8192, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_48[grid(131072)](buf421,
buf422, buf423, primals_130, primals_131, buf426, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_131
buf427 = extern_kernels.convolution(buf426, buf14, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf427, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf428 = buf427
del buf427
triton_poi_fused_convolution_49[grid(32768)](buf428, primals_133,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_133
buf429 = buf423
del buf423
buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf428, buf429,
buf430, buf432, 128, 256, num_warps=2, num_stages=1)
buf433 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf428,
buf429, buf430, primals_134, primals_135, buf433, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_135
buf435 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf437 = reinterpret_tensor(buf435, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf435
buf438 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf437,
primals_136, buf438, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf439, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf440 = buf415
del buf415
triton_poi_fused_add_53[grid(131072)](buf440, buf439, 131072,
XBLOCK=1024, num_warps=4, num_stages=1)
buf441 = buf430
del buf430
buf442 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf444 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_54[grid(128)](buf440, buf441,
buf442, buf444, 128, 1024, num_warps=8, num_stages=1)
buf445 = buf439
del buf439
triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf440,
buf441, buf442, primals_137, primals_138, buf445, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_138
buf447 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf449 = reinterpret_tensor(buf447, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf447
buf450 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192,
8192), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf449,
primals_139, buf450, 2048, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf451 = extern_kernels.convolution(buf445, buf450, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf451, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf452 = buf442
del buf442
buf453 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf455 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf451, buf452,
buf453, buf455, 128, 256, num_warps=2, num_stages=1)
buf456 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf451,
buf452, buf453, primals_140, primals_141, buf456, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_141
buf457 = extern_kernels.convolution(buf456, buf15, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf457, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf458 = buf457
del buf457
triton_poi_fused_convolution_49[grid(32768)](buf458, primals_143,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_143
buf459 = buf453
del buf453
buf460 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf462 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf458, buf459,
buf460, buf462, 128, 256, num_warps=2, num_stages=1)
buf463 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf458,
buf459, buf460, primals_144, primals_145, buf463, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_145
buf465 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf467 = reinterpret_tensor(buf465, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf465
buf468 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf467,
primals_146, buf468, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf469 = extern_kernels.convolution(buf463, buf468, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf469, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf470 = buf469
del buf469
triton_poi_fused_add_57[grid(131072)](buf470, buf440, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf471 = buf460
del buf460
buf472 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf474 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_54[grid(128)](buf470, buf471,
buf472, buf474, 128, 1024, num_warps=8, num_stages=1)
buf475 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192
), torch.float32)
triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf470,
buf471, buf472, primals_147, primals_148, buf475, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_148
buf477 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf479 = reinterpret_tensor(buf477, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf477
buf480 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192,
8192), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf479,
primals_149, buf480, 2048, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf481 = extern_kernels.convolution(buf475, buf480, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf481, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf482 = buf472
del buf472
buf483 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf485 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf481, buf482,
buf483, buf485, 128, 256, num_warps=2, num_stages=1)
buf486 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf481,
buf482, buf483, primals_150, primals_151, buf486, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_151
buf487 = extern_kernels.convolution(buf486, buf16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf487, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf488 = buf487
del buf487
triton_poi_fused_convolution_49[grid(32768)](buf488, primals_153,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_153
buf489 = buf483
del buf483
buf490 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf492 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf488, buf489,
buf490, buf492, 128, 256, num_warps=2, num_stages=1)
buf493 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf488,
buf489, buf490, primals_154, primals_155, buf493, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_155
buf495 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf497 = reinterpret_tensor(buf495, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf495
buf498 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf497,
primals_156, buf498, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf499 = extern_kernels.convolution(buf493, buf498, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf499, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf500 = buf499
del buf499
triton_poi_fused_add_57[grid(131072)](buf500, buf470, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf501 = buf490
del buf490
buf502 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf504 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_54[grid(128)](buf500, buf501,
buf502, buf504, 128, 1024, num_warps=8, num_stages=1)
buf505 = empty_strided_cuda((4, 8192, 2, 2), (32768, 1, 16384, 8192
), torch.float32)
triton_poi_fused_native_group_norm_relu_55[grid(131072)](buf500,
buf501, buf502, primals_157, primals_158, buf505, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_158
buf507 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf509 = reinterpret_tensor(buf507, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf507
buf510 = empty_strided_cuda((2048, 8192, 1, 1), (8192, 1, 8192,
8192), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_56[grid(2048)](buf509,
primals_159, buf510, 2048, 8192, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf511 = extern_kernels.convolution(buf505, buf510, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf511, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf512 = buf502
del buf502
buf513 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf515 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf511, buf512,
buf513, buf515, 128, 256, num_warps=2, num_stages=1)
buf516 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf511,
buf512, buf513, primals_160, primals_161, buf516, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_161
buf517 = extern_kernels.convolution(buf516, buf17, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf517, (4, 2048, 2, 2), (8192, 1, 4096, 2048))
buf518 = buf517
del buf517
triton_poi_fused_convolution_49[grid(32768)](buf518, primals_163,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_163
buf519 = buf513
del buf513
buf520 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf522 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_50[grid(128)](buf518, buf519,
buf520, buf522, 128, 256, num_warps=2, num_stages=1)
buf523 = empty_strided_cuda((4, 2048, 2, 2), (8192, 1, 4096, 2048),
torch.float32)
triton_poi_fused_native_group_norm_relu_51[grid(32768)](buf518,
buf519, buf520, primals_164, primals_165, buf523, 32768, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_165
buf525 = empty_strided_cuda((8192, 1, 1, 1), (1, 8192, 8192, 8192),
torch.float32)
buf527 = reinterpret_tensor(buf525, (8192, 1, 1, 1), (1, 1, 1, 1), 0)
del buf525
buf528 = empty_strided_cuda((8192, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_52[grid(8192)](buf527,
primals_166, buf528, 8192, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf529 = extern_kernels.convolution(buf523, buf528, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf529, (4, 8192, 2, 2), (32768, 1, 16384, 8192))
buf530 = buf529
del buf529
triton_poi_fused_add_57[grid(131072)](buf530, buf500, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf531 = reinterpret_tensor(buf520, (4, 32, 1, 1), (32, 1, 32, 32), 0)
del buf520
buf532 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf534 = reinterpret_tensor(buf532, (4, 32, 1, 1), (32, 1, 32, 32), 0)
del buf532
triton_per_fused_native_group_norm_58[grid(128)](buf534, buf530,
buf531, 128, 1024, num_warps=8, num_stages=1)
buf535 = empty_strided_cuda((4, 8192, 1, 1), (8192, 1, 8192, 8192),
torch.float32)
triton_poi_fused_mean_native_group_norm_relu_59[grid(32768)](buf530,
buf531, buf534, primals_167, primals_168, buf535, 32768, XBLOCK
=128, num_warps=4, num_stages=1)
buf536 = extern_kernels.convolution(buf535, primals_169, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf536, (4, 21843, 1, 1), (21843, 1, 21843, 21843))
buf537 = reinterpret_tensor(buf536, (4, 21843, 1, 1), (21843, 1, 1,
1), 0)
del buf536
triton_poi_fused_convolution_60[grid(87372)](buf537, primals_170,
87372, XBLOCK=512, num_warps=8, num_stages=1)
del primals_170
return (buf537, buf0, buf1, primals_3, primals_5, primals_6, primals_7,
buf2, primals_11, primals_13, primals_14, primals_16, primals_17,
buf3, primals_21, primals_23, primals_24, primals_26, primals_27,
buf4, primals_31, primals_33, primals_34, primals_36, primals_37,
buf5, primals_41, primals_43, primals_44, primals_46, primals_47,
primals_48, buf6, primals_52, primals_54, primals_55, primals_57,
primals_58, buf7, primals_62, primals_64, primals_65, primals_67,
primals_68, buf8, primals_72, primals_74, primals_75, primals_77,
primals_78, buf9, primals_82, primals_84, primals_85, primals_87,
primals_88, primals_89, buf10, primals_93, primals_95, primals_96,
primals_98, primals_99, buf11, primals_103, primals_105,
primals_106, primals_108, primals_109, buf12, primals_113,
primals_115, primals_116, primals_118, primals_119, buf13,
primals_123, primals_125, primals_126, primals_128, primals_129,
primals_130, buf14, primals_134, primals_136, primals_137,
primals_139, primals_140, buf15, primals_144, primals_146,
primals_147, primals_149, primals_150, buf16, primals_154,
primals_156, primals_157, primals_159, primals_160, buf17,
primals_164, primals_166, primals_167, primals_168, primals_169,
buf21, buf22, buf24, buf25, buf26, reinterpret_tensor(buf27, (4, 32
), (32, 1), 0), reinterpret_tensor(buf30, (4, 32), (32, 1), 0),
buf31, buf35, buf36, buf41, buf42, buf43, reinterpret_tensor(buf44,
(4, 32), (32, 1), 0), reinterpret_tensor(buf47, (4, 32), (32, 1), 0
), buf48, buf50, reinterpret_tensor(buf51, (4, 32), (32, 1), 0),
reinterpret_tensor(buf54, (4, 32), (32, 1), 0), buf55, buf59, buf60,
buf62, reinterpret_tensor(buf63, (4, 32), (32, 1), 0),
reinterpret_tensor(buf66, (4, 32), (32, 1), 0), buf67, buf71, buf72,
buf73, reinterpret_tensor(buf74, (4, 32), (32, 1), 0),
reinterpret_tensor(buf77, (4, 32), (32, 1), 0), buf78, buf80,
reinterpret_tensor(buf81, (4, 32), (32, 1), 0), reinterpret_tensor(
buf84, (4, 32), (32, 1), 0), buf85, buf89, buf90, buf92,
reinterpret_tensor(buf93, (4, 32), (32, 1), 0), reinterpret_tensor(
buf96, (4, 32), (32, 1), 0), buf97, buf101, buf102, buf103,
reinterpret_tensor(buf104, (4, 32), (32, 1), 0), reinterpret_tensor
(buf107, (4, 32), (32, 1), 0), buf108, buf110, reinterpret_tensor(
buf111, (4, 32), (32, 1), 0), reinterpret_tensor(buf114, (4, 32), (
32, 1), 0), buf115, buf119, buf120, buf122, reinterpret_tensor(
buf123, (4, 32), (32, 1), 0), reinterpret_tensor(buf126, (4, 32), (
32, 1), 0), buf127, buf131, buf132, buf133, reinterpret_tensor(
buf134, (4, 32), (32, 1), 0), reinterpret_tensor(buf137, (4, 32), (
32, 1), 0), buf138, buf140, reinterpret_tensor(buf141, (4, 32), (32,
1), 0), reinterpret_tensor(buf144, (4, 32), (32, 1), 0), buf145,
buf149, buf150, buf152, reinterpret_tensor(buf153, (4, 32), (32, 1),
0), reinterpret_tensor(buf156, (4, 32), (32, 1), 0), buf157, buf161,
buf162, buf167, buf168, buf169, reinterpret_tensor(buf170, (4, 32),
(32, 1), 0), reinterpret_tensor(buf173, (4, 32), (32, 1), 0),
buf174, buf176, reinterpret_tensor(buf177, (4, 32), (32, 1), 0),
reinterpret_tensor(buf180, (4, 32), (32, 1), 0), buf181, buf185,
buf186, buf188, reinterpret_tensor(buf189, (4, 32), (32, 1), 0),
reinterpret_tensor(buf192, (4, 32), (32, 1), 0), buf193, buf197,
buf198, buf199, reinterpret_tensor(buf200, (4, 32), (32, 1), 0),
reinterpret_tensor(buf203, (4, 32), (32, 1), 0), buf204, buf206,
reinterpret_tensor(buf207, (4, 32), (32, 1), 0), reinterpret_tensor
(buf210, (4, 32), (32, 1), 0), buf211, buf215, buf216, buf218,
reinterpret_tensor(buf219, (4, 32), (32, 1), 0), reinterpret_tensor
(buf222, (4, 32), (32, 1), 0), buf223, buf227, buf228, buf229,
reinterpret_tensor(buf230, (4, 32), (32, 1), 0), reinterpret_tensor
(buf233, (4, 32), (32, 1), 0), buf234, buf236, reinterpret_tensor(
buf237, (4, 32), (32, 1), 0), reinterpret_tensor(buf240, (4, 32), (
32, 1), 0), buf241, buf245, buf246, buf248, reinterpret_tensor(
buf249, (4, 32), (32, 1), 0), reinterpret_tensor(buf252, (4, 32), (
32, 1), 0), buf253, buf257, buf258, buf259, reinterpret_tensor(
buf260, (4, 32), (32, 1), 0), reinterpret_tensor(buf263, (4, 32), (
32, 1), 0), buf264, buf266, reinterpret_tensor(buf267, (4, 32), (32,
1), 0), reinterpret_tensor(buf270, (4, 32), (32, 1), 0), buf271,
buf275, buf276, buf278, reinterpret_tensor(buf279, (4, 32), (32, 1),
0), reinterpret_tensor(buf282, (4, 32), (32, 1), 0), buf283, buf287,
buf288, buf293, buf294, buf295, reinterpret_tensor(buf296, (4, 32),
(32, 1), 0), reinterpret_tensor(buf299, (4, 32), (32, 1), 0),
buf300, buf302, reinterpret_tensor(buf303, (4, 32), (32, 1), 0),
reinterpret_tensor(buf306, (4, 32), (32, 1), 0), buf307, buf311,
buf312, buf314, reinterpret_tensor(buf315, (4, 32), (32, 1), 0),
reinterpret_tensor(buf318, (4, 32), (32, 1), 0), buf319, buf323,
buf324, buf325, reinterpret_tensor(buf326, (4, 32), (32, 1), 0),
reinterpret_tensor(buf329, (4, 32), (32, 1), 0), buf330, buf332,
reinterpret_tensor(buf333, (4, 32), (32, 1), 0), reinterpret_tensor
(buf336, (4, 32), (32, 1), 0), buf337, buf341, buf342, buf344,
reinterpret_tensor(buf345, (4, 32), (32, 1), 0), reinterpret_tensor
(buf348, (4, 32), (32, 1), 0), buf349, buf353, buf354, buf355,
reinterpret_tensor(buf356, (4, 32), (32, 1), 0), reinterpret_tensor
(buf359, (4, 32), (32, 1), 0), buf360, buf362, reinterpret_tensor(
buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (
32, 1), 0), buf367, buf371, buf372, buf374, reinterpret_tensor(
buf375, (4, 32), (32, 1), 0), reinterpret_tensor(buf378, (4, 32), (
32, 1), 0), buf379, buf383, buf384, buf385, reinterpret_tensor(
buf386, (4, 32), (32, 1), 0), reinterpret_tensor(buf389, (4, 32), (
32, 1), 0), buf390, buf392, reinterpret_tensor(buf393, (4, 32), (32,
1), 0), reinterpret_tensor(buf396, (4, 32), (32, 1), 0), buf397,
buf401, buf402, buf404, reinterpret_tensor(buf405, (4, 32), (32, 1),
0), reinterpret_tensor(buf408, (4, 32), (32, 1), 0), buf409, buf413,
buf414, buf419, buf420, buf421, reinterpret_tensor(buf422, (4, 32),
(32, 1), 0), reinterpret_tensor(buf425, (4, 32), (32, 1), 0),
buf426, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0),
reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437,
buf438, buf440, reinterpret_tensor(buf441, (4, 32), (32, 1), 0),
reinterpret_tensor(buf444, (4, 32), (32, 1), 0), buf445, buf449,
buf450, buf451, reinterpret_tensor(buf452, (4, 32), (32, 1), 0),
reinterpret_tensor(buf455, (4, 32), (32, 1), 0), buf456, buf458,
reinterpret_tensor(buf459, (4, 32), (32, 1), 0), reinterpret_tensor
(buf462, (4, 32), (32, 1), 0), buf463, buf467, buf468, buf470,
reinterpret_tensor(buf471, (4, 32), (32, 1), 0), reinterpret_tensor
(buf474, (4, 32), (32, 1), 0), buf475, buf479, buf480, buf481,
reinterpret_tensor(buf482, (4, 32), (32, 1), 0), reinterpret_tensor
(buf485, (4, 32), (32, 1), 0), buf486, buf488, reinterpret_tensor(
buf489, (4, 32), (32, 1), 0), reinterpret_tensor(buf492, (4, 32), (
32, 1), 0), buf493, buf497, buf498, buf500, reinterpret_tensor(
buf501, (4, 32), (32, 1), 0), reinterpret_tensor(buf504, (4, 32), (
32, 1), 0), buf505, buf509, buf510, buf511, reinterpret_tensor(
buf512, (4, 32), (32, 1), 0), reinterpret_tensor(buf515, (4, 32), (
32, 1), 0), buf516, buf518, reinterpret_tensor(buf519, (4, 32), (32,
1), 0), reinterpret_tensor(buf522, (4, 32), (32, 1), 0), buf523,
buf527, buf528, buf530, buf531, buf534, buf535)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[
f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[
f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[
f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma'])
)
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma'])
)
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma'])
)
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class ResNetV2New(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843,
zero_head=False):
super().__init__()
wf = width_factor
self.wf = wf
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 *
wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad',
nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3,
stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 *
wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin=
256 * wf, cout=256 * wf, cmid=64 * wf)) for i in range(2,
block_units[0] + 1)]))), ('block2', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=256 * wf, cout=512 * wf, cmid=
128 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
512 * wf, cout=512 * wf, cmid=128 * wf)) for i in range(2,
block_units[1] + 1)]))), ('block3', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=512 * wf, cout=1024 * wf, cmid=
256 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin=
1024 * wf, cout=1024 * wf, cmid=256 * wf)) for i in range(2,
block_units[2] + 1)]))), ('block4', nn.Sequential(OrderedDict([
('unit01', PreActBottleneck(cin=1024 * wf, cout=2048 * wf, cmid
=512 * wf, stride=2))] + [(f'unit{i:02d}', PreActBottleneck(cin
=2048 * wf, cout=2048 * wf, cmid=512 * wf)) for i in range(2,
block_units[3] + 1)])))]))
self.zero_head = zero_head
self.head = nn.Sequential(OrderedDict([('gn', nn.GroupNorm(32, 2048 *
wf)), ('relu', nn.ReLU(inplace=True)), ('avg', nn.
AdaptiveAvgPool2d(output_size=1)), ('conv', nn.Conv2d(2048 * wf,
head_size, kernel_size=1, bias=True))]))
def load_from(self, weights, prefix='resnet/'):
with torch.no_grad():
self.root.conv.weight.copy_(tf2th(weights[
f'{prefix}root_block/standardized_conv2d/kernel']))
self.head.gn.weight.copy_(tf2th(weights[
f'{prefix}group_norm/gamma']))
self.head.gn.bias.copy_(tf2th(weights[f'{prefix}group_norm/beta']))
if self.zero_head:
nn.init.zeros_(self.head.conv.weight)
nn.init.zeros_(self.head.conv.bias)
else:
self.head.conv.weight.copy_(tf2th(weights[
f'{prefix}head/conv2d/kernel']))
self.head.conv.bias.copy_(tf2th(weights[
f'{prefix}head/conv2d/bias']))
for bname, block in self.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, prefix=f'{prefix}{bname}/{uname}/')
def forward(self, input_0):
primals_1 = self.root.conv.weight
primals_3 = self.body.block1.unit01.gn1.weight
primals_4 = self.body.block1.unit01.gn1.bias
primals_6 = self.body.block1.unit01.conv1.weight
primals_7 = self.body.block1.unit01.gn2.weight
primals_8 = self.body.block1.unit01.gn2.bias
primals_9 = self.body.block1.unit01.conv2.weight
primals_10 = self.body.block1.unit01.conv2.bias
primals_11 = self.body.block1.unit01.gn3.weight
primals_12 = self.body.block1.unit01.gn3.bias
primals_5 = self.body.block1.unit01.conv3.weight
primals_13 = self.body.block1.unit01.downsample.weight
primals_14 = self.body.block1.unit02.gn1.weight
primals_15 = self.body.block1.unit02.gn1.bias
primals_16 = self.body.block1.unit02.conv1.weight
primals_17 = self.body.block1.unit02.gn2.weight
primals_18 = self.body.block1.unit02.gn2.bias
primals_19 = self.body.block1.unit02.conv2.weight
primals_20 = self.body.block1.unit02.conv2.bias
primals_21 = self.body.block1.unit02.gn3.weight
primals_22 = self.body.block1.unit02.gn3.bias
primals_23 = self.body.block1.unit02.conv3.weight
primals_24 = self.body.block1.unit03.gn1.weight
primals_25 = self.body.block1.unit03.gn1.bias
primals_26 = self.body.block1.unit03.conv1.weight
primals_27 = self.body.block1.unit03.gn2.weight
primals_28 = self.body.block1.unit03.gn2.bias
primals_29 = self.body.block1.unit03.conv2.weight
primals_30 = self.body.block1.unit03.conv2.bias
primals_31 = self.body.block1.unit03.gn3.weight
primals_32 = self.body.block1.unit03.gn3.bias
primals_33 = self.body.block1.unit03.conv3.weight
primals_34 = self.body.block1.unit04.gn1.weight
primals_35 = self.body.block1.unit04.gn1.bias
primals_36 = self.body.block1.unit04.conv1.weight
primals_37 = self.body.block1.unit04.gn2.weight
primals_38 = self.body.block1.unit04.gn2.bias
primals_39 = self.body.block1.unit04.conv2.weight
primals_40 = self.body.block1.unit04.conv2.bias
primals_41 = self.body.block1.unit04.gn3.weight
primals_42 = self.body.block1.unit04.gn3.bias
primals_43 = self.body.block1.unit04.conv3.weight
primals_44 = self.body.block2.unit01.gn1.weight
primals_45 = self.body.block2.unit01.gn1.bias
primals_47 = self.body.block2.unit01.conv1.weight
primals_48 = self.body.block2.unit01.gn2.weight
primals_49 = self.body.block2.unit01.gn2.bias
primals_50 = self.body.block2.unit01.conv2.weight
primals_51 = self.body.block2.unit01.conv2.bias
primals_52 = self.body.block2.unit01.gn3.weight
primals_53 = self.body.block2.unit01.gn3.bias
primals_54 = self.body.block2.unit01.conv3.weight
primals_46 = self.body.block2.unit01.downsample.weight
primals_55 = self.body.block2.unit02.gn1.weight
primals_56 = self.body.block2.unit02.gn1.bias
primals_57 = self.body.block2.unit02.conv1.weight
primals_58 = self.body.block2.unit02.gn2.weight
primals_59 = self.body.block2.unit02.gn2.bias
primals_60 = self.body.block2.unit02.conv2.weight
primals_61 = self.body.block2.unit02.conv2.bias
primals_62 = self.body.block2.unit02.gn3.weight
primals_63 = self.body.block2.unit02.gn3.bias
primals_64 = self.body.block2.unit02.conv3.weight
primals_65 = self.body.block2.unit03.gn1.weight
primals_66 = self.body.block2.unit03.gn1.bias
primals_67 = self.body.block2.unit03.conv1.weight
primals_68 = self.body.block2.unit03.gn2.weight
primals_69 = self.body.block2.unit03.gn2.bias
primals_70 = self.body.block2.unit03.conv2.weight
primals_71 = self.body.block2.unit03.conv2.bias
primals_72 = self.body.block2.unit03.gn3.weight
primals_73 = self.body.block2.unit03.gn3.bias
primals_74 = self.body.block2.unit03.conv3.weight
primals_75 = self.body.block2.unit04.gn1.weight
primals_76 = self.body.block2.unit04.gn1.bias
primals_77 = self.body.block2.unit04.conv1.weight
primals_78 = self.body.block2.unit04.gn2.weight
primals_79 = self.body.block2.unit04.gn2.bias
primals_80 = self.body.block2.unit04.conv2.weight
primals_81 = self.body.block2.unit04.conv2.bias
primals_82 = self.body.block2.unit04.gn3.weight
primals_83 = self.body.block2.unit04.gn3.bias
primals_84 = self.body.block2.unit04.conv3.weight
primals_85 = self.body.block3.unit01.gn1.weight
primals_86 = self.body.block3.unit01.gn1.bias
primals_88 = self.body.block3.unit01.conv1.weight
primals_89 = self.body.block3.unit01.gn2.weight
primals_90 = self.body.block3.unit01.gn2.bias
primals_91 = self.body.block3.unit01.conv2.weight
primals_92 = self.body.block3.unit01.conv2.bias
primals_93 = self.body.block3.unit01.gn3.weight
primals_94 = self.body.block3.unit01.gn3.bias
primals_95 = self.body.block3.unit01.conv3.weight
primals_87 = self.body.block3.unit01.downsample.weight
primals_96 = self.body.block3.unit02.gn1.weight
primals_97 = self.body.block3.unit02.gn1.bias
primals_98 = self.body.block3.unit02.conv1.weight
primals_99 = self.body.block3.unit02.gn2.weight
primals_100 = self.body.block3.unit02.gn2.bias
primals_101 = self.body.block3.unit02.conv2.weight
primals_102 = self.body.block3.unit02.conv2.bias
primals_103 = self.body.block3.unit02.gn3.weight
primals_104 = self.body.block3.unit02.gn3.bias
primals_105 = self.body.block3.unit02.conv3.weight
primals_106 = self.body.block3.unit03.gn1.weight
primals_107 = self.body.block3.unit03.gn1.bias
primals_108 = self.body.block3.unit03.conv1.weight
primals_109 = self.body.block3.unit03.gn2.weight
primals_110 = self.body.block3.unit03.gn2.bias
primals_111 = self.body.block3.unit03.conv2.weight
primals_112 = self.body.block3.unit03.conv2.bias
primals_113 = self.body.block3.unit03.gn3.weight
primals_114 = self.body.block3.unit03.gn3.bias
primals_115 = self.body.block3.unit03.conv3.weight
primals_116 = self.body.block3.unit04.gn1.weight
primals_117 = self.body.block3.unit04.gn1.bias
primals_118 = self.body.block3.unit04.conv1.weight
primals_119 = self.body.block3.unit04.gn2.weight
primals_120 = self.body.block3.unit04.gn2.bias
primals_121 = self.body.block3.unit04.conv2.weight
primals_122 = self.body.block3.unit04.conv2.bias
primals_123 = self.body.block3.unit04.gn3.weight
primals_124 = self.body.block3.unit04.gn3.bias
primals_125 = self.body.block3.unit04.conv3.weight
primals_126 = self.body.block4.unit01.gn1.weight
primals_127 = self.body.block4.unit01.gn1.bias
primals_129 = self.body.block4.unit01.conv1.weight
primals_130 = self.body.block4.unit01.gn2.weight
primals_131 = self.body.block4.unit01.gn2.bias
primals_132 = self.body.block4.unit01.conv2.weight
primals_133 = self.body.block4.unit01.conv2.bias
primals_134 = self.body.block4.unit01.gn3.weight
primals_135 = self.body.block4.unit01.gn3.bias
primals_136 = self.body.block4.unit01.conv3.weight
primals_128 = self.body.block4.unit01.downsample.weight
primals_137 = self.body.block4.unit02.gn1.weight
primals_138 = self.body.block4.unit02.gn1.bias
primals_139 = self.body.block4.unit02.conv1.weight
primals_140 = self.body.block4.unit02.gn2.weight
primals_141 = self.body.block4.unit02.gn2.bias
primals_142 = self.body.block4.unit02.conv2.weight
primals_143 = self.body.block4.unit02.conv2.bias
primals_144 = self.body.block4.unit02.gn3.weight
primals_145 = self.body.block4.unit02.gn3.bias
primals_146 = self.body.block4.unit02.conv3.weight
primals_147 = self.body.block4.unit03.gn1.weight
primals_148 = self.body.block4.unit03.gn1.bias
primals_149 = self.body.block4.unit03.conv1.weight
primals_150 = self.body.block4.unit03.gn2.weight
primals_151 = self.body.block4.unit03.gn2.bias
primals_152 = self.body.block4.unit03.conv2.weight
primals_153 = self.body.block4.unit03.conv2.bias
primals_154 = self.body.block4.unit03.gn3.weight
primals_155 = self.body.block4.unit03.gn3.bias
primals_156 = self.body.block4.unit03.conv3.weight
primals_157 = self.body.block4.unit04.gn1.weight
primals_158 = self.body.block4.unit04.gn1.bias
primals_159 = self.body.block4.unit04.conv1.weight
primals_160 = self.body.block4.unit04.gn2.weight
primals_161 = self.body.block4.unit04.gn2.bias
primals_162 = self.body.block4.unit04.conv2.weight
primals_163 = self.body.block4.unit04.conv2.bias
primals_164 = self.body.block4.unit04.gn3.weight
primals_165 = self.body.block4.unit04.gn3.bias
primals_166 = self.body.block4.unit04.conv3.weight
primals_167 = self.head.gn.weight
primals_168 = self.head.gn.bias
primals_169 = self.head.conv.weight
primals_170 = self.head.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121, primals_122, primals_123, primals_124,
primals_125, primals_126, primals_127, primals_128, primals_129,
primals_130, primals_131, primals_132, primals_133, primals_134,
primals_135, primals_136, primals_137, primals_138, primals_139,
primals_140, primals_141, primals_142, primals_143, primals_144,
primals_145, primals_146, primals_147, primals_148, primals_149,
primals_150, primals_151, primals_152, primals_153, primals_154,
primals_155, primals_156, primals_157, primals_158, primals_159,
primals_160, primals_161, primals_162, primals_163, primals_164,
primals_165, primals_166, primals_167, primals_168, primals_169,
primals_170])
return output[0]
| matsuolab/DomainBed | ResNetV2 | false | 7,630 | [
"MIT"
] | 1 | 00e0e3d183b36fd4d0c50442012149794a6504c2 | https://github.com/matsuolab/DomainBed/tree/00e0e3d183b36fd4d0c50442012149794a6504c2 | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import torch.utils.data
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
def tf2th(conv_weights):
"""Possibly convert HWIO to OIHW."""
if conv_weights.ndim == 4:
conv_weights = conv_weights.transpose([3, 2, 0, 1])
return torch.from_numpy(conv_weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-10)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
Follows the implementation of "Identity Mappings in Deep Residual Networks":
https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua
Except it puts the stride on 3x3 conv when available.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cin)
self.conv1 = conv1x1(cin, cmid)
self.gn2 = nn.GroupNorm(32, cmid)
self.conv2 = conv3x3(cmid, cmid, stride)
self.gn3 = nn.GroupNorm(32, cmid)
self.conv3 = conv1x1(cmid, cout)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride)
def forward(self, x):
out = self.relu(self.gn1(x))
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(out)
out = self.conv1(out)
out = self.conv2(self.relu(self.gn2(out)))
out = self.conv3(self.relu(self.gn3(out)))
return out + residual
def load_from(self, weights, prefix=''):
convname = 'standardized_conv2d'
with torch.no_grad():
self.conv1.weight.copy_(tf2th(weights[
f'{prefix}a/{convname}/kernel']))
self.conv2.weight.copy_(tf2th(weights[
f'{prefix}b/{convname}/kernel']))
self.conv3.weight.copy_(tf2th(weights[
f'{prefix}c/{convname}/kernel']))
self.gn1.weight.copy_(tf2th(weights[f'{prefix}a/group_norm/gamma'])
)
self.gn2.weight.copy_(tf2th(weights[f'{prefix}b/group_norm/gamma'])
)
self.gn3.weight.copy_(tf2th(weights[f'{prefix}c/group_norm/gamma'])
)
self.gn1.bias.copy_(tf2th(weights[f'{prefix}a/group_norm/beta']))
self.gn2.bias.copy_(tf2th(weights[f'{prefix}b/group_norm/beta']))
self.gn3.bias.copy_(tf2th(weights[f'{prefix}c/group_norm/beta']))
if hasattr(self, 'downsample'):
w = weights[f'{prefix}a/proj/{convname}/kernel']
self.downsample.weight.copy_(tf2th(w))
class Model(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor, head_size=21843,
zero_head=False):
super().__init__()
wf = width_factor
self.wf = wf
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, 64 *
wf, kernel_size=7, stride=2, padding=3, bias=False)), ('pad',
nn.ConstantPad2d(1, 0)), ('pool', nn.MaxPool2d(kernel_size=3,
stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit01', PreActBottleneck(cin=64 * wf, cout=256 *
wf, cmid=64 * wf))] + [(f'unit{i:02d}', PreActBottleneck(cin=
256 * wf, cout=256 * wf, cmid=6
# ... truncated (>4000 chars) for memory efficiency |
HSwish | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/rw/crwdk3j4ojtagun7zhqwyf5bqe7salukzcept7ykzhjfr5nro3cm.py
# Topologically Sorted Source Nodes: [add, relu6, mul, out], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# mul => mul
# out => div
# relu6 => clamp_max, clamp_min
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %clamp_max), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 6), kwargs = {})
triton_poi_fused_add_div_hardtanh_mul_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, mul, out], Original ATen: [aten.add, aten.hardtanh, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class HSwish(nn.Module):
def __init__(self, inplace=True):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
out = x * F.relu6(x + 3, inplace=self.inplace) / 6
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = tmp0 * tmp6
tmp8 = 0.16666666666666666
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HSwishNew(nn.Module):
def __init__(self, inplace=True):
super(HSwishNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| AberHu/ImageNet-training | HSwish | false | 7,631 | [
"MIT"
] | 12 | 7201eb140176f4d7ec1ed0ff5c27deba2dfb60c2 | https://github.com/AberHu/ImageNet-training/tree/7201eb140176f4d7ec1ed0ff5c27deba2dfb60c2 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self, inplace=True):
super().__init__()
self.inplace = inplace
def forward(self, x):
out = x * F.relu6(x + 3, inplace=self.inplace) / 6
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Normalize | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/kz/ckz4kdygd3gfajopqwzvtecsmzyghdhlg3mc3xzmysjall7bayjn.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
# Source node to ATen node mapping:
# x => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class Normalize(torch.nn.Module):
def __init__(self):
super(Normalize, self).__init__()
self.normalize = torch.nn.functional.normalize
def forward(self, x):
x = self.normalize(x, dim=-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-12
tmp14 = triton_helpers.maximum(tmp12, tmp13)
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NormalizeNew(torch.nn.Module):
def __init__(self):
super(NormalizeNew, self).__init__()
self.normalize = torch.nn.functional.normalize
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Alescontrela/AMP_for_hardware | Normalize | false | 7,632 | [
"BSD-3-Clause"
] | 11 | bfb0dbdcf32bdf83a916790bddf193fffc7e79b8 | https://github.com/Alescontrela/AMP_for_hardware/tree/bfb0dbdcf32bdf83a916790bddf193fffc7e79b8 | import torch
import torch.utils.data
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.normalize = torch.nn.functional.normalize
def forward(self, x):
x = self.normalize(x, dim=-1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ResizeTransform | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/hl/chlilsvuwckmguymgdsz3emilese4fxitlrwtijcidjhwlmxq66x.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1, add_1, clamp_max_1, clamp_min, clamp_min_1, convert_element_type, convert_element_type_1, iota, mul, mul_1, sub, sub_1
# x_1 => mul_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (1,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 0), kwargs = {})
# %clamp_min : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul, 0.0), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min, torch.int64), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %clamp_max]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%arg0_1, [None, None, %convert_element_type_1]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_1, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.25), kwargs = {})
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.arange, aten._to_copy, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as nnf
import torch.utils
class ResizeTransform(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, x):
if self.factor < 1:
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
x = self.factor * x
elif self.factor > 1:
x = self.factor * x
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'vel_resize': 4, 'ndims': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0(in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 - tmp0
tmp3 = 0.0
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp6 = 0.25
tmp7 = tmp5 * tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy__unsafe_index_add_arange_clamp_mul_sub_0[grid
(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ResizeTransformNew(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Alison-brie/MultiPropReg | ResizeTransform | false | 7,633 | [
"MIT"
] | 14 | 526d843b161c0e2e53ec5c7c47de6964c6a44c60 | https://github.com/Alison-brie/MultiPropReg/tree/526d843b161c0e2e53ec5c7c47de6964c6a44c60 | import torch
import torch.nn as nn
import torch.nn.functional as nnf
import torch.utils
class Model(nn.Module):
"""
Resize a transform, which involves resizing the vector field *and* rescaling it.
"""
def __init__(self, vel_resize, ndims):
super().__init__()
self.factor = 1.0 / vel_resize
self.mode = 'linear'
if ndims == 2:
self.mode = 'bi' + self.mode
elif ndims == 3:
self.mode = 'tri' + self.mode
def forward(self, x):
if self.factor < 1:
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
x = self.factor * x
elif self.factor > 1:
x = self.factor * x
x = nnf.interpolate(x, align_corners=True, scale_factor=self.
factor, mode=self.mode)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LinearBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/4g/c4guhk7x6skkidedvs2gxz2kcu6gb76l3ig5crjjvjtzvnjlhlte.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class LinearBlock(nn.Module):
def __init__(self, in_dim, out_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2
class LinearBlockNew(nn.Module):
def __init__(self, in_dim, out_dim, norm='none', activation='relu'):
super(LinearBlockNew, self).__init__()
use_bias = True
self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Alikfp/research-GANwriting | LinearBlock | false | 7,634 | [
"MIT"
] | 41 | 2190954218a733deac52c929f51bb85bca5d7216 | https://github.com/Alikfp/research-GANwriting/tree/2190954218a733deac52c929f51bb85bca5d7216 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_dim, out_dim, norm='none', activation='relu'):
super().__init__()
use_bias = True
self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ResizeConv2d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/qc/cqczhltiazavubz4dgwwjcyogwua3vzjqvjlx6fo7w74pt5dijyi.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index]
# Source node to ATen node mapping:
# x => _unsafe_index
# Graph fragment:
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %unsqueeze, %convert_element_type_1]), kwargs = {})
triton_poi_fused__unsafe_index_0 = async_compile.triton('triton_poi_fused__unsafe_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x2 = (xindex // 16)
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + (4*tmp4) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/hk/chki5zhaaqdgqyzgekg25xjj5k3ujrsadzt2dzuu6bz2zhzu25es.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten._unsafe_index]
stream0 = get_raw_stream(0)
triton_poi_fused__unsafe_index_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 3, 3), (36, 9, 3, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 144, grid=grid(144), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.cuda
import torch.optim
import torch.utils.data
class ResizeConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, scale_factor,
mode='nearest'):
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1, padding=1)
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'scale_factor': 1.0}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.cuda
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__unsafe_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x2 = xindex // 16
x4 = xindex
tmp0 = x1
tmp1 = tmp0.to(tl.float32)
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tmp5 = x0
tmp6 = tmp5.to(tl.float32)
tmp7 = tmp6 * tmp2
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.load(in_ptr0 + (tmp8 + 4 * tmp4 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__unsafe_index_0[grid(256)](primals_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 3, 3), (36, 9, 3, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(144)](buf2, primals_3, 144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class ResizeConv2dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, scale_factor,
mode='nearest'):
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1, padding=1)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| AhmadQasim/MedAL | ResizeConv2d | false | 7,635 | [
"MIT"
] | 13 | 0ad6064d0d07f23722034b866ba86d93b62517f4 | https://github.com/AhmadQasim/MedAL/tree/0ad6064d0d07f23722034b866ba86d93b62517f4 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.cuda
import torch.optim
import torch.utils.data
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, scale_factor,
mode='nearest'):
super().__init__()
self.scale_factor = scale_factor
self.mode = mode
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,
stride=1, padding=1)
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4,
'scale_factor': 1.0}]
|
BalancedL1Loss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/od/codgjmibjthg5ee4tydbaiox7xzv2h2updcf2iabc7mzp45byskm.py
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, add, mul_1, mul_2, truediv, add_1, log, mul_3, mul_4, sub_1, mul_5, add_2, sub_2, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.add, aten.div, aten.log, aten.where, aten.mean]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# diff => abs_1
# log => log
# loss => where
# loss_1 => mean
# loss_bbox => mul_6
# lt => lt
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %abs_1 : [num_users=5] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 19.085536923187664), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.02619784824562798), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 19.085536923187664), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 1), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %log), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, 1.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, 0.07859354473688394), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %sub_1, %sub_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0 = async_compile.triton('triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 19.085536923187664
tmp7 = tmp3 * tmp6
tmp8 = tmp7 + tmp4
tmp9 = 0.02619784824562798
tmp10 = tmp8 * tmp9
tmp11 = tmp7 * tmp4
tmp12 = tmp11 + tmp4
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = 0.5
tmp16 = tmp3 * tmp15
tmp17 = tmp14 - tmp16
tmp18 = 1.5
tmp19 = tmp3 * tmp18
tmp20 = 0.07859354473688394
tmp21 = tmp19 + tmp20
tmp22 = tmp21 - tmp15
tmp23 = tl.where(tmp5, tmp17, tmp22)
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp29, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, diff, lt, mul, add, mul_1, mul_2, truediv, add_1, log, mul_3, mul_4, sub_1, mul_5, add_2, sub_2, loss, loss_1, loss_bbox], Original ATen: [aten.sub, aten.abs, aten.lt, aten.mul, aten.add, aten.div, aten.log, aten.where, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None, **kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta,
reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss_bbox
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = 19.085536923187664
tmp7 = tmp3 * tmp6
tmp8 = tmp7 + tmp4
tmp9 = 0.02619784824562798
tmp10 = tmp8 * tmp9
tmp11 = tmp7 * tmp4
tmp12 = tmp11 + tmp4
tmp13 = tl_math.log(tmp12)
tmp14 = tmp10 * tmp13
tmp15 = 0.5
tmp16 = tmp3 * tmp15
tmp17 = tmp14 - tmp16
tmp18 = 1.5
tmp19 = tmp3 * tmp18
tmp20 = 0.07859354473688394
tmp21 = tmp19 + tmp20
tmp22 = tmp21 - tmp15
tmp23 = tl.where(tmp5, tmp17, tmp22)
tmp24 = tl.broadcast_to(tmp23, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp26 / tmp27
tmp29 = tmp28 * tmp4
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_div_log_lt_mean_mul_sub_where_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class BalancedL1LossNew(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super(BalancedL1LossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| AllenPeng0209/SaccadeNet | BalancedL1Loss | false | 7,636 | [
"Apache-2.0"
] | 30 | 0fce4266cbffc9a2c5f70335efa636da849ce70c | https://github.com/AllenPeng0209/SaccadeNet/tree/0fce4266cbffc9a2c5f70335efa636da849ce70c | import functools
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
loss = loss * weight
if avg_factor is None:
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
loss = loss.sum() / avg_factor
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', avg_factor=
None, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
@weighted_loss
def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5,
reduction='mean'):
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e ** (gamma / alpha) - 1
loss = torch.where(diff < beta, alpha / b * (b * diff + 1) * torch.log(
b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b -
alpha * beta)
return loss
class Model(nn.Module):
"""Balanced L1 Loss
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
"""
def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean',
loss_weight=1.0):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None,
reduction_override=None, **kwargs):
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override if reduction_override else self.
reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(pred, target,
weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta,
reduction=reduction, avg_factor=avg_factor, **kwargs)
return loss_bbox
def get_inputs():
return [torch.ra
# ... truncated (>4000 chars) for memory efficiency |
Conv2dBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/ho/cho72zpf3yxrxsxjoqgiu5dmii3lj4efdjr3rk7fhvnzcxosbxxn.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_3, buf2, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
from torch import nn
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none',
activation='relu', pad_type='zero', use_bias=True, activation_first
=False):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'ks': 4, 'st': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1,
primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf1, primals_1, primals_2, buf2
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Conv2dBlockNew(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none',
activation='relu', pad_type='zero', use_bias=True, activation_first
=False):
super(Conv2dBlockNew, self).__init__()
self.use_bias = use_bias
self.activation_first = activation_first
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Alikfp/research-GANwriting | Conv2dBlock | false | 7,637 | [
"MIT"
] | 41 | 2190954218a733deac52c929f51bb85bca5d7216 | https://github.com/Alikfp/research-GANwriting/tree/2190954218a733deac52c929f51bb85bca5d7216 | import torch
import torch.nn.functional as F
from torch import nn
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super().__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign AdaIN weight first'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class Model(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0, norm='none',
activation='relu', pad_type='zero', use_bias=True, activation_first
=False):
super().__init__()
self.use_bias = use_bias
self.activation_first = activation_first
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, 'Unsupported normalization: {}'.format(norm)
if activation == 'relu':
self.activation = nn.ReLU(inplace=False)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=False)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, 'Unsupported activation: {}'.format(activation)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
def forward(self, x):
if self.activation_first:
if self.activation:
x = self.activation(x)
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
else:
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 4]
|
WeightedCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/go/cgobu5sa2tyxtk6mrlov2ofgra2mvlz7hlazhico2xqgi24yq2dl.py
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# cross_entropy => amax, clone, sub
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/vh/cvhukx6jny3lzngngzgomrj7cky7jbseunfw5z3n4zit7ppv5ftk.py
# Topologically Sorted Source Nodes: [target, cross_entropy], Original ATen: [aten.argmax, aten.nll_loss2d_forward]
# Source node to ATen node mapping:
# cross_entropy => full_default_1, ne_1, neg, where_1
# target => argmax
# Graph fragment:
# %argmax : [num_users=1] = call_function[target=torch.ops.aten.argmax.default](args = (%arg1_1, -1), kwargs = {})
# %ne_1 : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%view_1, -100), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%ne_1, %neg, %full_default_1), kwargs = {})
triton_poi_fused_argmax_nll_loss2d_forward_1 = async_compile.triton('triton_poi_fused_argmax_nll_loss2d_forward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_argmax_nll_loss2d_forward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_argmax_nll_loss2d_forward_1(in_ptr0, in_ptr1, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp56 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp61 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp64 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tmp45 = tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert(((0 <= tmp53) & (tmp53 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp53 < 4")
tmp55 = tl.load(in_ptr1 + (tmp53 + (4*x0)), xmask, eviction_policy='evict_last')
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tl.store(out_ptr1 + (x0), tmp71, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_1/inductor_cache/im/cimj3zcmmwkch43geltms4rxmgsxa52vmgqjukcerfaf5c3rob2a.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# loss => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %arg2_1), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [cross_entropy], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg0_1, buf1, 64, grid=grid(64), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [target, cross_entropy], Original ATen: [aten.argmax, aten.nll_loss2d_forward]
triton_poi_fused_argmax_nll_loss2d_forward_1.run(arg1_1, buf1, buf2, 16, grid=grid(16), stream=stream0)
del arg1_1
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf2, arg2_1, buf3, 64, grid=grid(64), stream=stream0)
del arg2_1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_argmax_nll_loss2d_forward_1(in_ptr0, in_ptr1, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp32 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp56 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp58 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp61 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp64 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 > tmp1
tmp3 = tmp0 == tmp1
tmp4 = tmp0 != tmp0
tmp5 = tmp1 != tmp1
tmp6 = tmp4 > tmp5
tmp7 = tmp2 | tmp6
tmp8 = tmp4 & tmp5
tmp9 = tmp3 | tmp8
tmp10 = tl.full([1], 0, tl.int64)
tmp11 = tl.full([1], 1, tl.int64)
tmp12 = tmp10 < tmp11
tmp13 = tmp9 & tmp12
tmp14 = tmp7 | tmp13
tmp15 = tl.where(tmp14, tmp0, tmp1)
tmp16 = tl.where(tmp14, tmp10, tmp11)
tmp18 = tmp15 > tmp17
tmp19 = tmp15 == tmp17
tmp20 = tmp15 != tmp15
tmp21 = tmp17 != tmp17
tmp22 = tmp20 > tmp21
tmp23 = tmp18 | tmp22
tmp24 = tmp20 & tmp21
tmp25 = tmp19 | tmp24
tmp26 = tl.full([1], 2, tl.int64)
tmp27 = tmp16 < tmp26
tmp28 = tmp25 & tmp27
tmp29 = tmp23 | tmp28
tmp30 = tl.where(tmp29, tmp15, tmp17)
tmp31 = tl.where(tmp29, tmp16, tmp26)
tmp33 = tmp30 > tmp32
tmp34 = tmp30 == tmp32
tmp35 = tmp30 != tmp30
tmp36 = tmp32 != tmp32
tmp37 = tmp35 > tmp36
tmp38 = tmp33 | tmp37
tmp39 = tmp35 & tmp36
tmp40 = tmp34 | tmp39
tmp41 = tl.full([1], 3, tl.int64)
tmp42 = tmp31 < tmp41
tmp43 = tmp40 & tmp42
tmp44 = tmp38 | tmp43
tl.where(tmp44, tmp30, tmp32)
tmp46 = tl.where(tmp44, tmp31, tmp41)
tmp47 = tl.full([1], -100, tl.int64)
tmp48 = tmp46 != tmp47
tmp49 = tl.where(tmp48, tmp46, tmp10)
tmp50 = tl.full([XBLOCK], 4, tl.int32)
tmp51 = tmp49 + tmp50
tmp52 = tmp49 < 0
tmp53 = tl.where(tmp52, tmp51, tmp49)
tl.device_assert((0 <= tmp53) & (tmp53 < 4) | ~xmask,
'index out of bounds: 0 <= tmp53 < 4')
tmp55 = tl.load(in_ptr1 + (tmp53 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp57 = tl_math.exp(tmp56)
tmp59 = tl_math.exp(tmp58)
tmp60 = tmp57 + tmp59
tmp62 = tl_math.exp(tmp61)
tmp63 = tmp60 + tmp62
tmp65 = tl_math.exp(tmp64)
tmp66 = tmp63 + tmp65
tmp67 = tl_math.log(tmp66)
tmp68 = tmp55 - tmp67
tmp69 = -tmp68
tmp70 = 0.0
tmp71 = tl.where(tmp48, tmp69, tmp70)
tl.store(out_ptr1 + x0, tmp71, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(64)](arg0_1, buf1, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 1, 4), (4, 16, 1), torch.float32)
triton_poi_fused_argmax_nll_loss2d_forward_1[grid(16)](arg1_1, buf1,
buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_mul_2[grid(64)](buf2, arg2_1, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg2_1
del buf2
return buf3,
class WeightedCrossEntropyLossNew(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLossNew, self).__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| AbangLZU/OpenPCDet | WeightedCrossEntropyLoss | false | 7,638 | [
"Apache-2.0"
] | 29 | eeea3f24d392f692228c1ad4e28c0dc9d0e25665 | https://github.com/AbangLZU/OpenPCDet/tree/eeea3f24d392f692228c1ad4e28c0dc9d0e25665 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super().__init__()
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return []
|
GlobalAvgPool2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/e7/ce73hyb6fl47lsvuo6oc4nyc7nbjn2cooo36plrte4gsotp7fcxm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
N = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
x = F.avg_pool2d(x, (H, W))
x = x.view(N, C)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4), (4, 1), 0),
class GlobalAvgPool2dNew(nn.Module):
def __init__(self):
super(GlobalAvgPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Alin1102/Yolov3_Dartnet2Caffe | GlobalAvgPool2d | false | 7,639 | [
"MIT"
] | 21 | b4284b080f53c1ac73c1930b1b1c4e07dcd97559 | https://github.com/Alin1102/Yolov3_Dartnet2Caffe/tree/b4284b080f53c1ac73c1930b1b1c4e07dcd97559 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
N = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
x = F.avg_pool2d(x, (H, W))
x = x.view(N, C)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Eltwise | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/vy/cvy3acxplr46otit37ckl3gn66usbt2zfvuqpsbrp6vt24b43v3p.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Eltwise(nn.Module):
def __init__(self, operation='+'):
super(Eltwise, self).__init__()
self.operation = operation
def forward(self, x1, x2):
if self.operation == '+' or self.operation == 'SUM':
x = x1 + x2
if self.operation == '*' or self.operation == 'MUL':
x = x1 * x2
if self.operation == '/' or self.operation == 'DIV':
x = x1 / x2
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class EltwiseNew(nn.Module):
def __init__(self, operation='+'):
super(EltwiseNew, self).__init__()
self.operation = operation
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Alin1102/Yolov3_Dartnet2Caffe | Eltwise | false | 7,640 | [
"MIT"
] | 21 | b4284b080f53c1ac73c1930b1b1c4e07dcd97559 | https://github.com/Alin1102/Yolov3_Dartnet2Caffe/tree/b4284b080f53c1ac73c1930b1b1c4e07dcd97559 | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Model(nn.Module):
def __init__(self, operation='+'):
super().__init__()
self.operation = operation
def forward(self, x1, x2):
if self.operation == '+' or self.operation == 'SUM':
x = x1 + x2
if self.operation == '*' or self.operation == 'MUL':
x = x1 * x2
if self.operation == '/' or self.operation == 'DIV':
x = x1 / x2
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MaxPoolStride1 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/ni/cni2lg4zkcb4yfdnvxs7bviwpfhpi26muesyqf6m6e632uqgcejf.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (x1)) + (x1) * ((x1) < (3)))) + (16*x2) + ((3) * ((3) <= (x0)) + (x0) * ((x0) < (3)))), xmask)
tmp1 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (x1)) + (x1) * ((x1) < (3)))) + (16*x2) + ((3) * ((3) <= (1 + x0)) + (1 + x0) * ((1 + x0) < (3)))), xmask)
tmp3 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + x1)) + (1 + x1) * ((1 + x1) < (3)))) + (16*x2) + ((3) * ((3) <= (x0)) + (x0) * ((x0) < (3)))), xmask)
tmp5 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + x1)) + (1 + x1) * ((1 + x1) < (3)))) + (16*x2) + ((3) * ((3) <= (1 + x0)) + (1 + x0) * ((1 + x0) < (3)))), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
class MaxPoolStride1(nn.Module):
def __init__(self):
super(MaxPoolStride1, self).__init__()
def forward(self, x):
x = F.max_pool2d(F.pad(x, (0, 1, 0, 1), mode='replicate'), 2, stride=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= x1) + x1 * (x1 < 3)) + 16 * x2 +
(3 * (3 <= x0) + x0 * (x0 < 3))), xmask)
tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= x1) + x1 * (x1 < 3)) + 16 * x2 +
(3 * (3 <= 1 + x0) + (1 + x0) * (1 + x0 < 3))), xmask)
tmp3 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + x1) + (1 + x1) * (1 + x1 <
3)) + 16 * x2 + (3 * (3 <= x0) + x0 * (x0 < 3))), xmask)
tmp5 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + x1) + (1 + x1) * (1 + x1 <
3)) + 16 * x2 + (3 * (3 <= 1 + x0) + (1 + x0) * (1 + x0 < 3))), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPoolStride1New(nn.Module):
def __init__(self):
super(MaxPoolStride1New, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Alin1102/Yolov3_Dartnet2Caffe | MaxPoolStride1 | false | 7,641 | [
"MIT"
] | 21 | b4284b080f53c1ac73c1930b1b1c4e07dcd97559 | https://github.com/Alin1102/Yolov3_Dartnet2Caffe/tree/b4284b080f53c1ac73c1930b1b1c4e07dcd97559 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
class Model(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = F.max_pool2d(F.pad(x, (0, 1, 0, 1), mode='replicate'), 2, stride=1)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SigmoidFocalClassificationLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/3m/c3mhxseecpkqbqvdyzfib5liiej2nz7jgrbqydwfjbhv3nwjb7th.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha_weight, pred_sigmoid, sub_1, mul_2, sub_2, mul_3, pt, pow_1, focal_weight, clamp, mul_5, sub_3, abs_1, neg, exp, log1p, loss, loss_1, mul_7], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.clamp, aten.sub, aten.abs, aten.neg, aten.exp, aten.log1p]
# Source node to ATen node mapping:
# abs_1 => abs_1
# alpha_weight => add
# clamp => clamp_min
# exp => exp
# focal_weight => mul_4
# log1p => log1p
# loss => add_2
# loss_1 => mul_6
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_5 => mul_5
# mul_7 => mul_7
# neg => neg
# pow_1 => pow_1
# pred_sigmoid => sigmoid
# pt => add_1
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 0.25), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.75), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %sub_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %sigmoid), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_1, 2.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %mul_5), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_3, %log1p), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_4, %add_2), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_6, %arg2_1), kwargs = {})
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp26 = tl.load(in_ptr2 + (x0), xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = 0.75
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp3 - tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp4 * tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp7 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp18 = tmp8 * tmp0
tmp19 = tmp17 - tmp18
tmp20 = tl_math.abs(tmp8)
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = libdevice.log1p(tmp22)
tmp24 = tmp19 + tmp23
tmp25 = tmp15 * tmp24
tmp27 = tmp25 * tmp26
tl.store(out_ptr0 + (x0), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sub, mul_1, alpha_weight, pred_sigmoid, sub_1, mul_2, sub_2, mul_3, pt, pow_1, focal_weight, clamp, mul_5, sub_3, abs_1, neg, exp, log1p, loss, loss_1, mul_7], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sigmoid, aten.pow, aten.clamp, aten.sub, aten.abs, aten.neg, aten.exp, aten.log1p]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: 'torch.Tensor', target:
'torch.Tensor'):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch
.exp(-torch.abs(input)))
return loss
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or weights.shape.__len__(
) == 1 and target.shape.__len__() == 2:
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0(
in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp26 = tl.load(in_ptr2 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = 0.75
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tl.sigmoid(tmp8)
tmp10 = tmp3 - tmp9
tmp11 = tmp0 * tmp10
tmp12 = tmp4 * tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp13 * tmp13
tmp15 = tmp7 * tmp14
tmp16 = 0.0
tmp17 = triton_helpers.maximum(tmp8, tmp16)
tmp18 = tmp8 * tmp0
tmp19 = tmp17 - tmp18
tmp20 = tl_math.abs(tmp8)
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = libdevice.log1p(tmp22)
tmp24 = tmp19 + tmp23
tmp25 = tmp15 * tmp24
tmp27 = tmp25 * tmp26
tl.store(out_ptr0 + x0, tmp27, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_clamp_exp_log1p_mul_neg_pow_rsub_sigmoid_sub_0[
grid(256)](arg1_1, arg0_1, arg2_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class SigmoidFocalClassificationLossNew(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLossNew, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: 'torch.Tensor', target:
'torch.Tensor'):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch
.exp(-torch.abs(input)))
return loss
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| AbangLZU/OpenPCDet | SigmoidFocalClassificationLoss | false | 7,642 | [
"Apache-2.0"
] | 29 | eeea3f24d392f692228c1ad4e28c0dc9d0e25665 | https://github.com/AbangLZU/OpenPCDet/tree/eeea3f24d392f692228c1ad4e28c0dc9d0e25665 | import torch
import torch.nn as nn
class Model(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: 'float'=2.0, alpha: 'float'=0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super().__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: 'torch.Tensor', target:
'torch.Tensor'):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + torch.log1p(torch
.exp(-torch.abs(input)))
return loss
def forward(self, input: 'torch.Tensor', target: 'torch.Tensor',
weights: 'torch.Tensor'):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or weights.shape.__len__(
) == 1 and target.shape.__len__() == 2:
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return []
|
L2Norm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_1/inductor_cache/5t/c5tspqtzeidbs36gvaxcfbh25f4xhztbl4xcft3spdsrzuwzcvde.py
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, mul, truediv], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.mul, aten.div]
# Source node to ATen node mapping:
# mul => mul
# norm => add
# pow_1 => pow_1
# sqrt => sqrt
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-10), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expand, %primals_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 4
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp3 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 1e-10
tmp16 = tmp14 + tmp15
tmp17 = tmp2 / tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, mul, truediv], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20.0, eps=1e-10):
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
norm = x.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return self.weight[None, :, None, None].expand_as(x) * x / norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_dims': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 4
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp3 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = libdevice.sqrt(tmp13)
tmp15 = 1e-10
tmp16 = tmp14 + tmp15
tmp17 = tmp2 / tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class L2NormNew(nn.Module):
def __init__(self, n_dims, scale=20.0, eps=1e-10):
super(L2NormNew, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| AllenPeng0209/SaccadeNet | L2Norm | false | 7,643 | [
"Apache-2.0"
] | 30 | 0fce4266cbffc9a2c5f70335efa636da849ce70c | https://github.com/AllenPeng0209/SaccadeNet/tree/0fce4266cbffc9a2c5f70335efa636da849ce70c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, n_dims, scale=20.0, eps=1e-10):
super().__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
norm = x.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return self.weight[None, :, None, None].expand_as(x) * x / norm
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.