entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
| pytorch_code
stringlengths 200
4.05k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
ScaledDotProductAttention | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nq/cnqgpozfupmeoly3clr6c5kuqetg544kqc2babbdai3pypst4tge.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 4), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attn_2 => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [attn_2, output], Original ATen: [aten._softmax, aten.bmm]
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'temperature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.25
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(arg1_1, reinterpret_tensor(arg0_1, (4, 4, 4), (
16, 1, 4), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = buf1
del buf1
extern_kernels.bmm(buf2, arg2_1, out=buf3)
del arg2_1
del buf2
return buf3,
class ScaledDotProductAttentionNew(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super(ScaledDotProductAttentionNew, self).__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| Xlinford/TDNet | ScaledDotProductAttention | false | 2,968 | [
"MIT"
] | 0 | e7cb59c40b8751b6dab9691d26ad224fd61c24d1 | https://github.com/Xlinford/TDNet/tree/e7cb59c40b8751b6dab9691d26ad224fd61c24d1 | import torch
import numpy as np
import torch.nn as nn
class Model(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [4]
|
ConvCompress | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# compressed_mem => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# compressed_mem => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [4], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1), (4, 1, 1))
del buf0
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [compressed_mem], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ConvCompress(nn.Module):
def __init__(self, d_model, ratio=4, groups=1):
super().__init__()
self.conv = nn.Conv1d(d_model, d_model, ratio, stride=ratio, groups
=groups)
def forward(self, mem):
mem = mem.transpose(1, 2)
compressed_mem = self.conv(mem)
return compressed_mem.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(4,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1), (4, 1, 1))
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 1, 4), (4, 1, 1), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class ConvCompressNew(nn.Module):
def __init__(self, d_model, ratio=4, groups=1):
super().__init__()
self.conv = nn.Conv1d(d_model, d_model, ratio, stride=ratio, groups
=groups)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Xinxinatg/DM-Count | ConvCompress | false | 2,969 | [
"MIT"
] | 0 | 9ac3327e26c0ede219bd44cb5a4ae6db9fded045 | https://github.com/Xinxinatg/DM-Count/tree/9ac3327e26c0ede219bd44cb5a4ae6db9fded045 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, d_model, ratio=4, groups=1):
super().__init__()
self.conv = nn.Conv1d(d_model, d_model, ratio, stride=ratio, groups
=groups)
def forward(self, mem):
mem = mem.transpose(1, 2)
compressed_mem = self.conv(mem)
return compressed_mem.transpose(1, 2)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
NICEMLPBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zi/czi6taqk3yywywfl3iwbejutxysbxi6hrg6s2rrrevzoemnmagnw.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4a/c4aftmtnqwr7hu6cf5jwqf3p4keti6kfufy6ubsztgw7benvzzji.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_7, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_2 = async_compile.triton('triton_poi_fused__weight_norm_interface_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/33/c33k5za2aswszitlq2wvmlb5z4qyqau47wey64xfk6mec52uns7s.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_6, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_7, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_3 = async_compile.triton('triton_poi_fused__weight_norm_interface_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf10, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_2.run(primals_7, buf4, 4, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_3.run(primals_7, primals_6, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf6, primals_5, buf9, 256, grid=grid(256), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf6, buf7, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf7, reinterpret_tensor(buf5, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_8
return (reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf5, primals_6, primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf4, buf7, buf5, buf9, primals_4, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class NICEMLPBlock(nn.Module):
def __init__(self, in_features, out_features, hidden_features, activation):
super(NICEMLPBlock, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True)
self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, x):
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3(out)
return out
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3.init(out, init_scale=0.0 * init_scale)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'hidden_features': 4,
'activation': 'relu'}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_3(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf10, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__weight_norm_interface_2[grid(4)](primals_7, buf4,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_3[grid(16)](primals_7,
primals_6, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf6,
primals_5, buf9, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf6, buf7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf6, (64, 4), (4, 1), 0)
del buf6
extern_kernels.addmm(primals_8, buf7, reinterpret_tensor(buf5, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_8
return reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf5, primals_6, primals_7, reinterpret_tensor(primals_3, (64, 4
), (4, 1), 0), buf2, buf4, buf7, buf5, buf9, primals_4, buf10
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class NICEMLPBlockNew(nn.Module):
def __init__(self, in_features, out_features, hidden_features, activation):
super(NICEMLPBlockNew, self).__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True)
self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.constant_(self.fc2.bias, 0.0)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3.init(out, init_scale=0.0 * init_scale)
return out
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_8 = self.fc3.linear.bias
primals_6 = self.fc3.linear.weight_g
primals_7 = self.fc3.linear.weight_v
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| TRUMANCFY/wolf | NICEMLPBlock | false | 2,970 | [
"Apache-2.0"
] | 0 | 1a21479256e4f51885e2d2fdd449b1faa61277a6 | https://github.com/TRUMANCFY/wolf/tree/1a21479256e4f51885e2d2fdd449b1faa61277a6 | import torch
import torch.nn as nn
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
class Model(nn.Module):
def __init__(self, in_features, out_features, hidden_features, activation):
super().__init__()
assert activation in ['relu', 'elu', 'leaky_relu']
self.fc1 = nn.Linear(in_features, hidden_features, bias=True)
self.fc2 = nn.Linear(hidden_features, hidden_features, bias=True)
self.fc3 = LinearWeightNorm(hidden_features, out_features, bias=True)
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'elu':
self.activation = nn.ELU(inplace=True)
else:
self.activation = nn.LeakyReLU(inplace=True, negative_slope=0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.fc1.bias, 0.0)
nn.init.constant_(self.fc2.bias, 0.0)
def forward(self, x):
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3(out)
return out
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self.activation(self.fc1(x))
out = self.activation(self.fc2(out))
out = self.fc3.init(out, init_scale=0.0 * init_scale)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'hidden_features': 4,
'activation': 'relu'}]
|
SummaryNet_large | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4m/c4myhxugnxr4tj5c6zrn3tgw5c6ujfrchuebz4dagnthuep25twe.py
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv1d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1], [2], [1], False, [0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 53) % 20
x2 = (xindex // 1060)
x3 = xindex % 1060
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + (1088*x2)), tmp4, xmask)
tl.store(out_ptr1 + (x3 + (1152*x2)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4o/c4ogr2pqpv24q35ktvntst5pcndq5babeeqet2pqt6xxv4azmatb.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets, getitem_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 5], [1, 5], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = (xindex // 10) % 20
x2 = (xindex // 200)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((5*x0) + (53*x1) + (1088*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (5*x0) + (53*x1) + (1088*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (5*x0) + (53*x1) + (1088*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (5*x0) + (53*x1) + (1088*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (4 + (5*x0) + (53*x1) + (1088*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp18 = tmp17 > tmp16
tmp19 = tl.full([1], 4, tl.int8)
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x3), tmp20, xmask)
tl.store(out_ptr1 + (x3), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jh/cjhhuvqv4jpwci3pjoye5g5d6rb6bmgur47aivil3d63wvznmwsr.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 51), (102, 51, 1))
assert_size_stride(primals_2, (20, 2, 3), (6, 3, 1))
assert_size_stride(primals_3, (20, ), (1, ))
assert_size_stride(primals_4, (8, 200), (200, 1))
assert_size_stride(primals_5, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(2,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 53), (1060, 53, 1))
buf1 = empty_strided_cuda((4, 20, 53), (1088, 53, 1), torch.float32)
buf7 = empty_strided_cuda((4, 20, 53), (1152, 53, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv1d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf0, primals_3, buf1, buf7, 4240, grid=grid(4240), stream=stream0)
del buf0
del primals_3
buf2 = empty_strided_cuda((4, 20, 1, 10), (200, 10, 10, 1), torch.int8)
buf3 = empty_strided_cuda((4, 20, 1, 10), (200, 10, 10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 800, grid=grid(800), stream=stream0)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (4, 200), (200, 1), 0), reinterpret_tensor(primals_4, (200, 8), (1, 200), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
buf6 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_5, buf6, 32, grid=grid(32), stream=stream0)
del primals_5
return (buf5, primals_2, primals_1, reinterpret_tensor(buf1, (4, 20, 1, 53), (1088, 53, 0, 1), 0), buf2, reinterpret_tensor(buf3, (4, 200), (200, 1), 0), buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 2, 51), (102, 51, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((20, 2, 3), (6, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((8, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SummaryNet_large(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv1d(in_channels=2, out_channels=20, kernel_size=
3, padding=2)
self.pool = nn.MaxPool1d(kernel_size=5, stride=5)
self.fc = nn.Linear(in_features=20 * 5 * 2, out_features=8)
def forward(self, x):
x = x.view(-1, 2, 51)
x = self.pool(F.relu(self.conv1(x)))
x = x.view(-1, 20 * 5 * 2)
x = F.relu(self.fc(x))
return x
def get_inputs():
return [torch.rand([4, 2, 51])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 53 % 20
x2 = xindex // 1060
x3 = xindex % 1060
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3 + 1088 * x2), tmp4, xmask)
tl.store(out_ptr1 + (x3 + 1152 * x2), tmp6, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 10
x1 = xindex // 10 % 20
x2 = xindex // 200
x3 = xindex
tmp0 = tl.load(in_ptr0 + (5 * x0 + 53 * x1 + 1088 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 5 * x0 + 53 * x1 + 1088 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 5 * x0 + 53 * x1 + 1088 * x2), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 5 * x0 + 53 * x1 + 1088 * x2), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (4 + 5 * x0 + 53 * x1 + 1088 * x2), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp18 = tmp17 > tmp16
tmp19 = tl.full([1], 4, tl.int8)
tmp20 = tl.where(tmp18, tmp19, tmp15)
tmp21 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + x3, tmp20, xmask)
tl.store(out_ptr1 + x3, tmp21, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 2, 51), (102, 51, 1))
assert_size_stride(primals_2, (20, 2, 3), (6, 3, 1))
assert_size_stride(primals_3, (20,), (1,))
assert_size_stride(primals_4, (8, 200), (200, 1))
assert_size_stride(primals_5, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,),
padding=(2,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 20, 53), (1060, 53, 1))
buf1 = empty_strided_cuda((4, 20, 53), (1088, 53, 1), torch.float32)
buf7 = empty_strided_cuda((4, 20, 53), (1152, 53, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(4240)](buf0
, primals_3, buf1, buf7, 4240, XBLOCK=128, num_warps=4,
num_stages=1)
del buf0
del primals_3
buf2 = empty_strided_cuda((4, 20, 1, 10), (200, 10, 10, 1), torch.int8)
buf3 = empty_strided_cuda((4, 20, 1, 10), (200, 10, 10, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_1[grid(800)](buf1, buf2,
buf3, 800, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (4, 200), (200, 1), 0),
reinterpret_tensor(primals_4, (200, 8), (1, 200), 0), out=buf4)
buf5 = buf4
del buf4
buf6 = empty_strided_cuda((4, 8), (8, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(32)](buf5,
primals_5, buf6, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_5
return buf5, primals_2, primals_1, reinterpret_tensor(buf1, (4, 20, 1,
53), (1088, 53, 0, 1), 0), buf2, reinterpret_tensor(buf3, (4, 200),
(200, 1), 0), buf6, primals_4, buf7
class SummaryNet_largeNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv1d(in_channels=2, out_channels=20, kernel_size=
3, padding=2)
self.pool = nn.MaxPool1d(kernel_size=5, stride=5)
self.fc = nn.Linear(in_features=20 * 5 * 2, out_features=8)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.fc.weight
primals_5 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Wrede/BNN-LFI | SummaryNet_large | false | 2,971 | [
"MIT"
] | 0 | 8c5094f01c1eef286bdd84613c7259d534d2eb7e | https://github.com/Wrede/BNN-LFI/tree/8c5094f01c1eef286bdd84613c7259d534d2eb7e | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv1d(in_channels=2, out_channels=20, kernel_size=
3, padding=2)
self.pool = nn.MaxPool1d(kernel_size=5, stride=5)
self.fc = nn.Linear(in_features=20 * 5 * 2, out_features=8)
def forward(self, x):
x = x.view(-1, 2, 51)
x = self.pool(F.relu(self.conv1(x)))
x = x.view(-1, 20 * 5 * 2)
x = F.relu(self.fc(x))
return x
def get_inputs():
return [torch.rand([4, 2, 51])]
def get_init_inputs():
return []
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4e/c4efs56ymyev6yow4ruutakn3po5nni7rvtifmzxqreckdzecoje.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yl/cyl57twtgf3lzd5sst7snomgtzysir6mpvrzx6jm7k4lxpcq6sru.py
# Topologically Sorted Source Nodes: [x_3, add], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_3, add], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(buf4, primals_5, primals_1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_5
return (buf4, primals_2, primals_4, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv0 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, padding=1)
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, padding=1)
def forward(self, x):
inputs = x
x = torch.relu(x)
x = self.conv0(x)
x = torch.relu(x)
x = self.conv1(x)
return x + inputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
triton_poi_fused_add_convolution_2[grid(256)](buf4, primals_5,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_5
return buf4, primals_2, primals_4, buf0, buf2
class ResidualBlockNew(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv0 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, padding=1)
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, padding=1)
def forward(self, input_0):
primals_2 = self.conv0.weight
primals_3 = self.conv0.bias
primals_4 = self.conv1.weight
primals_5 = self.conv1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Thaigun/Griddly | ResidualBlock | false | 2,972 | [
"MIT"
] | 0 | de5972a608a2928172510a0ac81a977c48af6b1f | https://github.com/Thaigun/Griddly/tree/de5972a608a2928172510a0ac81a977c48af6b1f | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv0 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, padding=1)
self.conv1 = nn.Conv2d(in_channels=channels, out_channels=channels,
kernel_size=3, padding=1)
def forward(self, x):
inputs = x
x = torch.relu(x)
x = self.conv0(x)
x = torch.relu(x)
x = self.conv1(x)
return x + inputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
FPNOutput | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
return (buf2, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=1, stride=1, padding=0,
norm_layer=None, bias=True, *args, **kwargs):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=
stride, padding=padding, bias=bias)
self.norm_layer = norm_layer
if norm_layer is not None:
self.bn = norm_layer(out_chan, activation='leaky_relu')
self.init_weight()
def forward(self, x):
x = self.conv(x)
if self.norm_layer is not None:
x = self.bn(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
class FPNOutput(nn.Module):
def __init__(self, in_chan, mid_chan, n_classes, norm_layer=None, *args,
**kwargs):
super(FPNOutput, self).__init__()
self.norm_layer = norm_layer
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1,
norm_layer=norm_layer)
self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=
False)
self.init_weight()
def forward(self, x):
x = self.conv(x)
x = self.conv_out(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if module.bias is not None:
nowd_params.append(module.bias)
elif isinstance(module, self.norm_layer):
nowd_params += list(module.parameters())
return wd_params, nowd_params
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_chan': 4, 'mid_chan': 4, 'n_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_2, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
return buf2, primals_1, primals_3, primals_4, buf1
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=1, stride=1, padding=0,
norm_layer=None, bias=True, *args, **kwargs):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=
stride, padding=padding, bias=bias)
self.norm_layer = norm_layer
if norm_layer is not None:
self.bn = norm_layer(out_chan, activation='leaky_relu')
self.init_weight()
def forward(self, x):
x = self.conv(x)
if self.norm_layer is not None:
x = self.bn(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
class FPNOutputNew(nn.Module):
def __init__(self, in_chan, mid_chan, n_classes, norm_layer=None, *args,
**kwargs):
super(FPNOutputNew, self).__init__()
self.norm_layer = norm_layer
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1,
norm_layer=norm_layer)
self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=
False)
self.init_weight()
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if module.bias is not None:
nowd_params.append(module.bias)
elif isinstance(module, self.norm_layer):
nowd_params += list(module.parameters())
return wd_params, nowd_params
def forward(self, input_0):
primals_1 = self.conv.conv.weight
primals_2 = self.conv.conv.bias
primals_4 = self.conv_out.weight
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Xlinford/TDNet | FPNOutput | false | 2,974 | [
"MIT"
] | 0 | e7cb59c40b8751b6dab9691d26ad224fd61c24d1 | https://github.com/Xlinford/TDNet/tree/e7cb59c40b8751b6dab9691d26ad224fd61c24d1 | import torch
import torch.nn as nn
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=1, stride=1, padding=0,
norm_layer=None, bias=True, *args, **kwargs):
super().__init__()
self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=
stride, padding=padding, bias=bias)
self.norm_layer = norm_layer
if norm_layer is not None:
self.bn = norm_layer(out_chan, activation='leaky_relu')
self.init_weight()
def forward(self, x):
x = self.conv(x)
if self.norm_layer is not None:
x = self.bn(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
class Model(nn.Module):
def __init__(self, in_chan, mid_chan, n_classes, norm_layer=None, *args,
**kwargs):
super().__init__()
self.norm_layer = norm_layer
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1,
norm_layer=norm_layer)
self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=
False)
self.init_weight()
def forward(self, x):
x = self.conv(x)
x = self.conv_out(x)
return x
def init_weight(self):
for ly in self.children():
if isinstance(ly, nn.Conv2d):
nn.init.kaiming_normal_(ly.weight, a=1)
if ly.bias is not None:
nn.init.constant_(ly.bias, 0)
def get_params(self):
wd_params, nowd_params = [], []
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if module.bias is not None:
nowd_params.append(module.bias)
elif isinstance(module, self.norm_layer):
nowd_params += list(module.parameters())
return wd_params, nowd_params
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Net_Tran | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ak/cakyigvxou5hbdyaetmjyb652hrwk5nkaquv4ojwt7jcqafi2kpv.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2z/c2zxupn5747aea7fisufidvz6gdax3vmue4tm3yipiw2muh3z4u2.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [2, 2], [1, 1], [1, 1], True, [1, 1], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (8, ), (1, ))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_9, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 65536, grid=grid(65536), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 131072, grid=grid(131072), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_0.run(buf5, primals_7, 65536, grid=grid(65536), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=True, output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf6, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf7, primals_9, 131072, grid=grid(131072), stream=stream0)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((16, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((16, 8, 3, 3), (72, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Net_Tran(nn.Module):
def __init__(self):
super(Net_Tran, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, stride=2, padding=1)
self.deconv1 = nn.ConvTranspose2d(16, 8, 3, 2, 1, output_padding=1)
self.conv2 = nn.Conv2d(8, 16, 3, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(16, 8, 3, 2, 1, output_padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.deconv1(out)
out = self.conv2(out)
out = self.deconv2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (16, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (16,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_5, (8,), (1,))
assert_size_stride(primals_6, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (16, 8, 3, 3), (72, 9, 3, 1))
assert_size_stride(primals_9, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(65536)](buf1, primals_2, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(131072)](buf3, primals_5,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 32, 32), (16384, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_0[grid(65536)](buf5, primals_7, 65536,
XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=True,
output_padding=(1, 1), groups=1, bias=None)
assert_size_stride(buf6, (4, 8, 64, 64), (32768, 4096, 64, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_1[grid(131072)](buf7, primals_9,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5)
class Net_TranNew(nn.Module):
def __init__(self):
super(Net_TranNew, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3, stride=2, padding=1)
self.deconv1 = nn.ConvTranspose2d(16, 8, 3, 2, 1, output_padding=1)
self.conv2 = nn.Conv2d(8, 16, 3, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(16, 8, 3, 2, 1, output_padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.deconv1.weight
primals_5 = self.deconv1.bias
primals_6 = self.conv2.weight
primals_7 = self.conv2.bias
primals_8 = self.deconv2.weight
primals_9 = self.deconv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| YibinXie/Pose_Estimation | Net_Tran | false | 2,975 | [
"MIT"
] | 0 | 5849140bf842bf3aeaad75827f5e7b7f2999c9ee | https://github.com/YibinXie/Pose_Estimation/tree/5849140bf842bf3aeaad75827f5e7b7f2999c9ee | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3, stride=2, padding=1)
self.deconv1 = nn.ConvTranspose2d(16, 8, 3, 2, 1, output_padding=1)
self.conv2 = nn.Conv2d(8, 16, 3, stride=2, padding=1)
self.deconv2 = nn.ConvTranspose2d(16, 8, 3, 2, 1, output_padding=1)
def forward(self, x):
out = self.conv1(x)
out = self.deconv1(out)
out = self.conv2(out)
out = self.deconv2(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
FlatCat | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/w5/cw5lgerkhn6xwjivul47up3nhmkyy673i3rrzz3llfepayid7qp3.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%view, %view_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = (xindex // 128)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((64*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 128, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((64*x1) + ((-64) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, arg1_1, buf0, 512, grid=grid(512), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class FlatCat(torch.nn.Module):
def __init__(self):
super(FlatCat, self).__init__()
def forward(self, x, y):
x = x.view(x.shape[0], -1, 1, 1)
y = y.view(y.shape[0], -1, 1, 1)
return torch.cat([x, y], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 128
x1 = xindex // 128
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (64 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp9 = tl.load(in_ptr1 + (64 * x1 + (-64 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 128, 1, 1), (128, 1, 1, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](arg0_1, arg1_1, buf0, 512, XBLOCK
=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class FlatCatNew(torch.nn.Module):
def __init__(self):
super(FlatCatNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| YirongMao/torch2trt | FlatCat | false | 2,976 | [
"MIT"
] | 0 | 7635051998a9cd6b9483da1569814031c04a1b52 | https://github.com/YirongMao/torch2trt/tree/7635051998a9cd6b9483da1569814031c04a1b52 | import torch
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
x = x.view(x.shape[0], -1, 1, 1)
y = y.view(y.shape[0], -1, 1, 1)
return torch.cat([x, y], 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
MagnitudeTestModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bp/cbpsi7uwlydcd7y2kls25pvhwekyxrjudqkrogdjrqus4cqarq6e.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 31752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3969) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gm/cgmelkfs4tb5oft7i3x4t4vlbq6bn4nda6r26q3bzcbfecw42lmb.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 14884
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 63, 63), (7938, 3969, 63, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 31752, grid=grid(31752), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 61, 61), (3721, 3721, 61, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 14884, grid=grid(14884), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 2, 3, 3), (18, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
def fill_bias(module, value):
module.bias.data.fill_(value)
def fill_conv_weight(conv, value):
conv.weight.data.fill_(value)
with torch.no_grad():
mask = torch.eye(conv.kernel_size[0])
conv.weight += mask
def create_conv(in_channels, out_channels, kernel_size, weight_init, bias_init
):
conv = nn.Conv2d(in_channels, out_channels, kernel_size)
fill_conv_weight(conv, weight_init)
fill_bias(conv, bias_init)
return conv
class conv(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(conv, self).__init__()
self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(5, 5, 3), stride=(stride, stride, 1),
padding=(2, 2, 1), bias=False, groups=groups)
def forward(self, x):
out = x
out = self.conv1(out)
return out
class MagnitudeTestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = create_conv(1, 2, 2, 9, -2)
self.conv2 = create_conv(2, 1, 3, -10, 0)
def forward(self, x):
return self.conv2(self.conv1(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 31752
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3969 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 14884
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (1, 2, 3, 3), (18, 9, 3, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 2, 63, 63), (7938, 3969, 63, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(31752)](buf1, primals_2, 31752,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 1, 61, 61), (3721, 3721, 61, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(14884)](buf3, primals_5, 14884,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
def fill_bias(module, value):
module.bias.data.fill_(value)
def fill_conv_weight(conv, value):
conv.weight.data.fill_(value)
with torch.no_grad():
mask = torch.eye(conv.kernel_size[0])
conv.weight += mask
def create_conv(in_channels, out_channels, kernel_size, weight_init, bias_init
):
conv = nn.Conv2d(in_channels, out_channels, kernel_size)
fill_conv_weight(conv, weight_init)
fill_bias(conv, bias_init)
return conv
class conv(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(conv, self).__init__()
self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(5, 5, 3), stride=(stride, stride, 1),
padding=(2, 2, 1), bias=False, groups=groups)
def forward(self, x):
out = x
out = self.conv1(out)
return out
class MagnitudeTestModelNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = create_conv(1, 2, 2, 9, -2)
self.conv2 = create_conv(2, 1, 3, -10, 0)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| JinYAnGHe/openvino_training_extensions | MagnitudeTestModel | false | 2,977 | [
"Apache-2.0"
] | 0 | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | https://github.com/JinYAnGHe/openvino_training_extensions/tree/a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
def fill_bias(module, value):
module.bias.data.fill_(value)
def fill_conv_weight(conv, value):
conv.weight.data.fill_(value)
with torch.no_grad():
mask = torch.eye(conv.kernel_size[0])
conv.weight += mask
def create_conv(in_channels, out_channels, kernel_size, weight_init, bias_init
):
conv = nn.Conv2d(in_channels, out_channels, kernel_size)
fill_conv_weight(conv, weight_init)
fill_bias(conv, bias_init)
return conv
class conv(nn.Module):
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=
out_channels, kernel_size=(5, 5, 3), stride=(stride, stride, 1),
padding=(2, 2, 1), bias=False, groups=groups)
def forward(self, x):
out = x
out = self.conv1(out)
return out
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = create_conv(1, 2, 2, 9, -2)
self.conv2 = create_conv(2, 1, 3, -10, 0)
def forward(self, x):
return self.conv2(self.conv1(x))
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return []
|
ResidualBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wj/cwjdpyw3b2vvyd2ut7hme3me2ikgp27orbvtzdfgd2ptb2uf263d.py
# Topologically Sorted Source Nodes: [out_2, relu_1, add], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# out_2 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_1, %primals_3), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2, relu_1, add], Original ATen: [aten.convolution, aten.relu, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_relu_threshold_backward_1.run(buf2, primals_5, primals_3, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
class ResidualBlock(nn.Module):
def __init__(self, in_channels, hidden, out_channels):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=hidden,
kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=hidden, out_channels=
out_channels, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.conv2(out)
return self.relu2(out) + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'hidden': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = tmp4 <= tmp7
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_convolution_relu_threshold_backward_1[grid(256)](
buf2, primals_5, primals_3, buf3, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4
class ResidualBlockNew(nn.Module):
def __init__(self, in_channels, hidden, out_channels):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=hidden,
kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=hidden, out_channels=
out_channels, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| YigitGunduc/self-driving-car | ResidualBlock | false | 2,978 | [
"MIT"
] | 0 | 2be31f6473c911cf004236ce0874cb2da8fe8ad1 | https://github.com/YigitGunduc/self-driving-car/tree/2be31f6473c911cf004236ce0874cb2da8fe8ad1 | import torch
import torch.utils.data
from torch import nn
class Model(nn.Module):
def __init__(self, in_channels, hidden, out_channels):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=hidden,
kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=hidden, out_channels=
out_channels, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.ReLU()
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.conv2(out)
return self.relu2(out) + x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
FusedLeakyReLU | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cx/ccxnongcgusvhvf5whrpjbz4ddnlsjovjzjngbpjfxvhdw7ihrhu.py
# Topologically Sorted Source Nodes: [add, leaky_relu, mul], Original ATen: [aten.add, aten.leaky_relu, aten.mul]
# Source node to ATen node mapping:
# add => add
# leaky_relu => gt, mul, where
# mul => mul_1
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %view), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.4142135623730951), kwargs = {})
triton_poi_fused_add_leaky_relu_mul_0 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 1.4142135623730951
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, leaky_relu, mul], Original ATen: [aten.add, aten.leaky_relu, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_leaky_relu_mul_0.run(primals_2, primals_1, buf0, buf1, 256, grid=grid(256), stream=stream0)
del primals_1
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = 1.4142135623730951
tmp9 = tmp7 * tmp8
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp9, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_mul_0[grid(256)](primals_2,
primals_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
return buf1, buf0
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
class FusedLeakyReLUNew(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input_0):
primals_1 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| YotamNitzan/pixel2style2pixel | FusedLeakyReLU | false | 2,979 | [
"MIT"
] | 0 | b943f9e6de046a54b901eea1d8714cb02a71605f | https://github.com/YotamNitzan/pixel2style2pixel/tree/b943f9e6de046a54b901eea1d8714cb02a71605f | import torch
from torch import nn
from torch.nn import functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
class Model(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
GAT | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/rn/crnvqrppxb3ocltksxzamskmx3mpvscqlqbanvhmgkmj5b53kfuf.py
# Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# e => add
# e_1 => gt
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %permute), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
triton_poi_fused_add_leaky_relu_0 = async_compile.triton('triton_poi_fused_add_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k2/ck2qvrsga6qzh3n4zrcqhgn3gcw55gg5hx55rqebdhhoptcty66e.py
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
# Source node to ATen node mapping:
# gt => gt_1
# Graph fragment:
# %gt_1 : [num_users=6] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_4, 0), kwargs = {})
triton_poi_fused_gt_1 = async_compile.triton('triton_poi_fused_gt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gt_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7p/c7pzocdj4z66742c4x73l2mmqga3yo74menkparuudjxrge7crrh.py
# Topologically Sorted Source Nodes: [e, e_1, zero_vec, attention, attention_1, e_2, e_3, attention_3, attention_4, e_4, e_5, attention_6, attention_7, e_6, e_7, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => amax
# attention_10 => amax_3
# attention_3 => where_4
# attention_4 => amax_1
# attention_6 => where_7
# attention_7 => amax_2
# attention_9 => where_10
# e => add
# e_1 => mul, where
# e_2 => add_1
# e_3 => mul_5, where_3
# e_4 => add_2
# e_5 => mul_10, where_6
# e_6 => add_3
# e_7 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %permute), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {})
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_1, [1], True), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_5, %permute_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %add_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %amax_1 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_4, [1], True), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_9, %permute_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %add_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %amax_2 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_7, [1], True), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_13, %permute_3), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %amax_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_10, [1], True), kwargs = {})
triton_poi_fused__softmax_add_leaky_relu_mul_where_2 = async_compile.triton('triton_poi_fused__softmax_add_leaky_relu_mul_where_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: '*fp32', 7: '*i1', 8: '*fp32', 9: '*fp32', 10: '*i1', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_leaky_relu_mul_where_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 40, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tl.load(in_ptr3 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr3 + (1))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp22 = tl.load(in_ptr3 + (2))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp29 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp31 = tl.load(in_ptr3 + (3))
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp38 = tl.load(in_ptr4 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr5 + (x0), xmask)
tmp40 = tl.load(in_ptr6 + (0))
tmp41 = tl.broadcast_to(tmp40, [XBLOCK])
tmp46 = tl.load(in_ptr4 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp47 = tl.load(in_ptr6 + (1))
tmp48 = tl.broadcast_to(tmp47, [XBLOCK])
tmp54 = tl.load(in_ptr4 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp55 = tl.load(in_ptr6 + (2))
tmp56 = tl.broadcast_to(tmp55, [XBLOCK])
tmp62 = tl.load(in_ptr4 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp63 = tl.load(in_ptr6 + (3))
tmp64 = tl.broadcast_to(tmp63, [XBLOCK])
tmp70 = tl.load(in_ptr7 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp71 = tl.load(in_ptr8 + (x0), xmask)
tmp72 = tl.load(in_ptr9 + (0))
tmp73 = tl.broadcast_to(tmp72, [XBLOCK])
tmp78 = tl.load(in_ptr7 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp79 = tl.load(in_ptr9 + (1))
tmp80 = tl.broadcast_to(tmp79, [XBLOCK])
tmp86 = tl.load(in_ptr7 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp87 = tl.load(in_ptr9 + (2))
tmp88 = tl.broadcast_to(tmp87, [XBLOCK])
tmp94 = tl.load(in_ptr7 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp95 = tl.load(in_ptr9 + (3))
tmp96 = tl.broadcast_to(tmp95, [XBLOCK])
tmp102 = tl.load(in_ptr10 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp103 = tl.load(in_ptr11 + (x0), xmask)
tmp104 = tl.load(in_ptr12 + (0))
tmp105 = tl.broadcast_to(tmp104, [XBLOCK])
tmp110 = tl.load(in_ptr10 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp111 = tl.load(in_ptr12 + (1))
tmp112 = tl.broadcast_to(tmp111, [XBLOCK])
tmp118 = tl.load(in_ptr10 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp119 = tl.load(in_ptr12 + (2))
tmp120 = tl.broadcast_to(tmp119, [XBLOCK])
tmp126 = tl.load(in_ptr10 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp127 = tl.load(in_ptr12 + (3))
tmp128 = tl.broadcast_to(tmp127, [XBLOCK])
tmp5 = tmp2 + tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tl.where(tmp1, tmp5, tmp7)
tmp9 = -8999999815811072.0
tmp10 = tl.where(tmp0, tmp8, tmp9)
tmp15 = tmp2 + tmp14
tmp16 = tmp15 * tmp6
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = tl.where(tmp11, tmp17, tmp9)
tmp19 = triton_helpers.maximum(tmp10, tmp18)
tmp24 = tmp2 + tmp23
tmp25 = tmp24 * tmp6
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp20, tmp26, tmp9)
tmp28 = triton_helpers.maximum(tmp19, tmp27)
tmp33 = tmp2 + tmp32
tmp34 = tmp33 * tmp6
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp29, tmp35, tmp9)
tmp37 = triton_helpers.maximum(tmp28, tmp36)
tmp42 = tmp39 + tmp41
tmp43 = tmp42 * tmp6
tmp44 = tl.where(tmp38, tmp42, tmp43)
tmp45 = tl.where(tmp0, tmp44, tmp9)
tmp49 = tmp39 + tmp48
tmp50 = tmp49 * tmp6
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tl.where(tmp11, tmp51, tmp9)
tmp53 = triton_helpers.maximum(tmp45, tmp52)
tmp57 = tmp39 + tmp56
tmp58 = tmp57 * tmp6
tmp59 = tl.where(tmp54, tmp57, tmp58)
tmp60 = tl.where(tmp20, tmp59, tmp9)
tmp61 = triton_helpers.maximum(tmp53, tmp60)
tmp65 = tmp39 + tmp64
tmp66 = tmp65 * tmp6
tmp67 = tl.where(tmp62, tmp65, tmp66)
tmp68 = tl.where(tmp29, tmp67, tmp9)
tmp69 = triton_helpers.maximum(tmp61, tmp68)
tmp74 = tmp71 + tmp73
tmp75 = tmp74 * tmp6
tmp76 = tl.where(tmp70, tmp74, tmp75)
tmp77 = tl.where(tmp0, tmp76, tmp9)
tmp81 = tmp71 + tmp80
tmp82 = tmp81 * tmp6
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tl.where(tmp11, tmp83, tmp9)
tmp85 = triton_helpers.maximum(tmp77, tmp84)
tmp89 = tmp71 + tmp88
tmp90 = tmp89 * tmp6
tmp91 = tl.where(tmp86, tmp89, tmp90)
tmp92 = tl.where(tmp20, tmp91, tmp9)
tmp93 = triton_helpers.maximum(tmp85, tmp92)
tmp97 = tmp71 + tmp96
tmp98 = tmp97 * tmp6
tmp99 = tl.where(tmp94, tmp97, tmp98)
tmp100 = tl.where(tmp29, tmp99, tmp9)
tmp101 = triton_helpers.maximum(tmp93, tmp100)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 * tmp6
tmp108 = tl.where(tmp102, tmp106, tmp107)
tmp109 = tl.where(tmp0, tmp108, tmp9)
tmp113 = tmp103 + tmp112
tmp114 = tmp113 * tmp6
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.where(tmp11, tmp115, tmp9)
tmp117 = triton_helpers.maximum(tmp109, tmp116)
tmp121 = tmp103 + tmp120
tmp122 = tmp121 * tmp6
tmp123 = tl.where(tmp118, tmp121, tmp122)
tmp124 = tl.where(tmp20, tmp123, tmp9)
tmp125 = triton_helpers.maximum(tmp117, tmp124)
tmp129 = tmp103 + tmp128
tmp130 = tmp129 * tmp6
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.where(tmp29, tmp131, tmp9)
tmp133 = triton_helpers.maximum(tmp125, tmp132)
tl.store(out_ptr0 + (x0), tmp37, xmask)
tl.store(out_ptr1 + (x0), tmp69, xmask)
tl.store(out_ptr2 + (x0), tmp101, xmask)
tl.store(out_ptr3 + (x0), tmp133, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/si/csiextwu44e63m7askimz3girudboqtyp45f2wu2wmll5iovqchv.py
# Topologically Sorted Source Nodes: [e, e_1, zero_vec, attention, attention_1, e_2, e_3, attention_3, attention_4, e_4, e_5, attention_6, attention_7, e_6, e_7, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention => where_1
# attention_1 => exp, sub
# attention_10 => exp_3, sub_3
# attention_3 => where_4
# attention_4 => exp_1, sub_1
# attention_6 => where_7
# attention_7 => exp_2, sub_2
# attention_9 => where_10
# e => add
# e_1 => mul, where
# e_2 => add_1
# e_3 => mul_5, where_3
# e_4 => add_2
# e_5 => mul_10, where_6
# e_6 => add_3
# e_7 => mul_15, where_9
# zero_vec => full_default
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_1, %permute), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul), kwargs = {})
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where, %full_default), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %add_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_5, %permute_1), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 4), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %add_1, %mul_5), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_3, %full_default), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_4, %amax_1), kwargs = {})
# %exp_1 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_9, %permute_2), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 4), kwargs = {})
# %where_6 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %add_2, %mul_10), kwargs = {})
# %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_6, %full_default), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_7, %amax_2), kwargs = {})
# %exp_2 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_13, %permute_3), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 4), kwargs = {})
# %where_9 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_3, %mul_15), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_9, %full_default), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_10, %amax_3), kwargs = {})
# %exp_3 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_3,), kwargs = {})
triton_poi_fused__softmax_add_leaky_relu_mul_where_3 = async_compile.triton('triton_poi_fused__softmax_add_leaky_relu_mul_where_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*i1', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*i1', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_leaky_relu_mul_where_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x2), xmask).to(tl.int1)
tmp14 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr7 + (x0), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr9 + (x2), xmask).to(tl.int1)
tmp24 = tl.load(in_ptr10 + (x1), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr11 + (x0), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr12 + (x1), xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr13 + (x2), xmask).to(tl.int1)
tmp34 = tl.load(in_ptr14 + (x1), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr15 + (x0), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr16 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tl.where(tmp1, tmp4, tmp6)
tmp8 = -8999999815811072.0
tmp9 = tl.where(tmp0, tmp7, tmp8)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tmp16 * tmp5
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tl.where(tmp0, tmp18, tmp8)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp5
tmp28 = tl.where(tmp23, tmp26, tmp27)
tmp29 = tl.where(tmp0, tmp28, tmp8)
tmp31 = tmp29 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp5
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.where(tmp0, tmp38, tmp8)
tmp41 = tmp39 - tmp40
tmp42 = tl_math.exp(tmp41)
tl.store(out_ptr0 + (x2), tmp12, xmask)
tl.store(out_ptr1 + (x2), tmp22, xmask)
tl.store(out_ptr2 + (x2), tmp32, xmask)
tl.store(out_ptr3 + (x2), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rr/crrmj7r54x5uk325xkhuskxp4m5prz3fpx53yc2st4o5pwbhq32p.py
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# attention_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xp/cxpnlviefwxbdj7cbio4oqhkzb74qnjn5guhdplnmdcsr7cnbsyp.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_2, %where_5, %where_8, %where_11], 1), kwargs = {})
triton_poi_fused_cat_5 = async_compile.triton('triton_poi_fused_cat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp30 & xmask, eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tmp40 = tl.full([1], 16, tl.int64)
tmp41 = tmp0 < tmp40
tmp42 = tl.load(in_ptr3 + ((4*x1) + ((-12) + x0)), tmp39 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + (x2), tmp52, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/g2/cg2m5jpgsmsheozlj7op7zoco4x7nplswmyzh66rmfnns22uywtb.py
# Topologically Sorted Source Nodes: [zero_vec, e_8, e_9, attention_12, attention_13], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention_12 => where_13
# attention_13 => amax_4
# e_8 => add_4
# e_9 => mul_20, where_12
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_17, %permute_4), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 4), kwargs = {})
# %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %add_4, %mul_20), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_12, %full_default), kwargs = {})
# %amax_4 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_13, [1], True), kwargs = {})
triton_poi_fused__softmax_add_leaky_relu_mul_where_6 = async_compile.triton('triton_poi_fused__softmax_add_leaky_relu_mul_where_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_leaky_relu_mul_where_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 13, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x0), xmask)
tmp3 = tl.load(in_ptr3 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp13 = tl.load(in_ptr3 + (1))
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp22 = tl.load(in_ptr3 + (2))
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp29 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp31 = tl.load(in_ptr3 + (3))
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp5 = tmp2 + tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tl.where(tmp1, tmp5, tmp7)
tmp9 = -8999999815811072.0
tmp10 = tl.where(tmp0, tmp8, tmp9)
tmp15 = tmp2 + tmp14
tmp16 = tmp15 * tmp6
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = tl.where(tmp11, tmp17, tmp9)
tmp19 = triton_helpers.maximum(tmp10, tmp18)
tmp24 = tmp2 + tmp23
tmp25 = tmp24 * tmp6
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp20, tmp26, tmp9)
tmp28 = triton_helpers.maximum(tmp19, tmp27)
tmp33 = tmp2 + tmp32
tmp34 = tmp33 * tmp6
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp29, tmp35, tmp9)
tmp37 = triton_helpers.maximum(tmp28, tmp36)
tl.store(out_ptr0 + (x0), tmp37, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3p/c3pw4zgk6qf2aose2yqbculke53py2ghtcozhat76besgjeo2j6h.py
# Topologically Sorted Source Nodes: [zero_vec, e_8, e_9, attention_12, attention_13], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.where, aten._softmax]
# Source node to ATen node mapping:
# attention_12 => where_13
# attention_13 => exp_4, sub_4
# e_8 => add_4
# e_9 => mul_20, where_12
# zero_vec => full_default
# Graph fragment:
# %full_default : [num_users=5] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], -8999999815811072.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_17, %permute_4), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 4), kwargs = {})
# %where_12 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %add_4, %mul_20), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %where_12, %full_default), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_13, %amax_4), kwargs = {})
# %exp_4 : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_4,), kwargs = {})
triton_poi_fused__softmax_add_leaky_relu_mul_where_7 = async_compile.triton('triton_poi_fused__softmax_add_leaky_relu_mul_where_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_add_leaky_relu_mul_where_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + (x2), xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tl.where(tmp1, tmp4, tmp6)
tmp8 = -8999999815811072.0
tmp9 = tl.where(tmp0, tmp7, tmp8)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vn/cvnlzux2xi3qwahaidfxslw62nvrvjgyqouwnuxx7bn6vc4gb2jx.py
# Topologically Sorted Source Nodes: [x_3, log_softmax], Original ATen: [aten.elu, aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax_5, sub_5
# x_3 => expm1_4, gt_14, mul_22, mul_24, where_14
# Graph fragment:
# %gt_14 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mm_19, 0), kwargs = {})
# %mul_22 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_19, 1.0), kwargs = {})
# %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_22,), kwargs = {})
# %mul_24 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.0), kwargs = {})
# %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_14, %mul_22, %mul_24), kwargs = {})
# %amax_5 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where_14, [1], True), kwargs = {})
# %sub_5 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where_14, %amax_5), kwargs = {})
triton_poi_fused__log_softmax_elu_8 = async_compile.triton('triton_poi_fused__log_softmax_elu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_elu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_elu_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp8 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tmp8 > tmp1
tmp10 = tmp8 * tmp3
tmp11 = libdevice.expm1(tmp10)
tmp12 = tmp11 * tmp3
tmp13 = tl.where(tmp9, tmp10, tmp12)
tmp15 = tmp14 > tmp1
tmp16 = tmp14 * tmp3
tmp17 = libdevice.expm1(tmp16)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp15, tmp16, tmp18)
tmp20 = triton_helpers.maximum(tmp13, tmp19)
tmp22 = tmp21 > tmp1
tmp23 = tmp21 * tmp3
tmp24 = libdevice.expm1(tmp23)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp22, tmp23, tmp25)
tmp27 = triton_helpers.maximum(tmp20, tmp26)
tmp29 = tmp28 > tmp1
tmp30 = tmp28 * tmp3
tmp31 = libdevice.expm1(tmp30)
tmp32 = tmp31 * tmp3
tmp33 = tl.where(tmp29, tmp30, tmp32)
tmp34 = triton_helpers.maximum(tmp27, tmp33)
tmp35 = tmp7 - tmp34
tl.store(out_ptr0 + (x2), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uv/cuvhhdiycantl2koymc5p6rharsyggopa6l4kqtmxc7kcvu52m2v.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp_5, log, sub_6, sum_6
# Graph fragment:
# %exp_5 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_5,), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp_5, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_6,), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_5, %log), kwargs = {})
triton_poi_fused__log_softmax_9 = async_compile.triton('triton_poi_fused__log_softmax_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh1], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 1), (1, 1), 0), out=buf1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh2], Original ATen: [aten.mm]
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 1), (1, 1), 4), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e, e_1], Original ATen: [aten.add, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_leaky_relu_0.run(buf1, buf2, buf3, 16, grid=grid(16), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [gt], Original ATen: [aten.gt]
triton_poi_fused_gt_1.run(primals_4, buf4, 16, grid=grid(16), stream=stream0)
del primals_4
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh1_1], Original ATen: [aten.mm]
extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh2_1], Original ATen: [aten.mm]
extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1), 4), out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_2, e_3], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_0.run(buf10, buf11, buf12, 16, grid=grid(16), stream=stream0)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh1_2], Original ATen: [aten.mm]
extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh2_2], Original ATen: [aten.mm]
extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1, 1), 4), out=buf19)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_4, e_5], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_0.run(buf18, buf19, buf20, 16, grid=grid(16), stream=stream0)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh1_3], Original ATen: [aten.mm]
extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1, 1), 0), out=buf26)
buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh2_3], Original ATen: [aten.mm]
extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1, 1), 4), out=buf27)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_6, e_7], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_0.run(buf26, buf27, buf28, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [e, e_1, zero_vec, attention, attention_1, e_2, e_3, attention_3, attention_4, e_4, e_5, attention_6, attention_7, e_6, e_7, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_add_leaky_relu_mul_where_2.run(buf4, buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19, buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, grid=grid(4), stream=stream0)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [e, e_1, zero_vec, attention, attention_1, e_2, e_3, attention_3, attention_4, e_4, e_5, attention_6, attention_7, e_6, e_7, attention_9, attention_10], Original ATen: [aten.add, aten.leaky_relu, aten.mul, aten.where, aten._softmax]
triton_poi_fused__softmax_add_leaky_relu_mul_where_3.run(buf4, buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20, buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14, buf22, buf30, 16, grid=grid(16), stream=stream0)
del buf1
del buf10
del buf11
del buf13
del buf18
del buf19
del buf2
del buf21
del buf26
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 16, grid=grid(16), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [h_prime], Original ATen: [aten.mm]
extern_kernels.mm(buf7, buf0, out=buf8)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf14, buf15, 16, grid=grid(16), stream=stream0)
buf16 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [h_prime_1], Original ATen: [aten.mm]
extern_kernels.mm(buf15, buf9, out=buf16)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_7], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf22, buf23, 16, grid=grid(16), stream=stream0)
buf24 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [h_prime_2], Original ATen: [aten.mm]
extern_kernels.mm(buf23, buf17, out=buf24)
buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_10], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf30, buf31, 16, grid=grid(16), stream=stream0)
buf32 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [h_prime_3], Original ATen: [aten.mm]
extern_kernels.mm(buf31, buf25, out=buf32)
buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
triton_poi_fused_cat_5.run(buf8, buf16, buf24, buf32, buf33, 64, grid=grid(64), stream=stream0)
buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [Wh_4], Original ATen: [aten.mm]
extern_kernels.mm(buf33, primals_11, out=buf34)
buf35 = reinterpret_tensor(buf5, (4, 1), (1, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [Wh1_4], Original ATen: [aten.mm]
extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1, 1), 0), out=buf35)
buf36 = reinterpret_tensor(buf29, (4, 1), (1, 1), 0); del buf29 # reuse
# Topologically Sorted Source Nodes: [Wh2_4], Original ATen: [aten.mm]
extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1, 1), 4), out=buf36)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [e_8, e_9], Original ATen: [aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_0.run(buf35, buf36, buf37, 16, grid=grid(16), stream=stream0)
buf38 = reinterpret_tensor(buf27, (4, 1), (1, 4), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [zero_vec, e_8, e_9, attention_12, attention_13], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.where, aten._softmax]
triton_poi_fused__softmax_add_leaky_relu_mul_where_6.run(buf4, buf37, buf35, buf36, buf38, 4, grid=grid(4), stream=stream0)
buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [zero_vec, e_8, e_9, attention_12, attention_13], Original ATen: [aten.mul, aten.add, aten.leaky_relu, aten.where, aten._softmax]
triton_poi_fused__softmax_add_leaky_relu_mul_where_7.run(buf4, buf37, buf35, buf36, buf38, buf39, 16, grid=grid(16), stream=stream0)
del buf35
del buf36
del buf38
buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attention_13], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf39, buf40, 16, grid=grid(16), stream=stream0)
buf41 = buf39; del buf39 # reuse
# Topologically Sorted Source Nodes: [h_prime_4], Original ATen: [aten.mm]
extern_kernels.mm(buf40, buf34, out=buf41)
buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, log_softmax], Original ATen: [aten.elu, aten._log_softmax]
triton_poi_fused__log_softmax_elu_8.run(buf41, buf42, 16, grid=grid(16), stream=stream0)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_9.run(buf42, buf43, 16, grid=grid(16), stream=stream0)
del buf42
return (buf43, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20, buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, buf43, reinterpret_tensor(buf34, (4, 4), (1, 4), 0), reinterpret_tensor(primals_12, (1, 4), (1, 1), 4), reinterpret_tensor(primals_12, (1, 4), (1, 1), 0), reinterpret_tensor(buf33, (16, 4), (1, 16), 0), reinterpret_tensor(primals_11, (4, 16), (1, 4), 0), reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(primals_10, (1, 4), (1, 1), 4), reinterpret_tensor(primals_10, (1, 4), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor(primals_8, (1, 4), (1, 1), 4), reinterpret_tensor(primals_8, (1, 4), (1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0), reinterpret_tensor(primals_6, (1, 4), (1, 1), 4), reinterpret_tensor(primals_6, (1, 4), (1, 1), 0), reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (1, 4), (1, 1), 4), reinterpret_tensor(primals_3, (1, 4), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((8, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
e = self._prepare_attentional_mechanism_input(Wh)
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
e = Wh1 + Wh2.T
return self.leakyrelu(e)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GAT(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
"""Dense version of GAT."""
super(GAT, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=
dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5,
'alpha': 4, 'nheads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_gt_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tl.load(in_ptr3 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr3 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp22 = tl.load(in_ptr3 + 2)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp31 = tl.load(in_ptr3 + 3)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp38 = tl.load(in_ptr4 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp39 = tl.load(in_ptr5 + x0, xmask)
tmp40 = tl.load(in_ptr6 + 0)
tmp41 = tl.broadcast_to(tmp40, [XBLOCK])
tmp46 = tl.load(in_ptr4 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp47 = tl.load(in_ptr6 + 1)
tmp48 = tl.broadcast_to(tmp47, [XBLOCK])
tmp54 = tl.load(in_ptr4 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp55 = tl.load(in_ptr6 + 2)
tmp56 = tl.broadcast_to(tmp55, [XBLOCK])
tmp62 = tl.load(in_ptr4 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp63 = tl.load(in_ptr6 + 3)
tmp64 = tl.broadcast_to(tmp63, [XBLOCK])
tmp70 = tl.load(in_ptr7 + 4 * x0, xmask, eviction_policy='evict_last').to(
tl.int1)
tmp71 = tl.load(in_ptr8 + x0, xmask)
tmp72 = tl.load(in_ptr9 + 0)
tmp73 = tl.broadcast_to(tmp72, [XBLOCK])
tmp78 = tl.load(in_ptr7 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp79 = tl.load(in_ptr9 + 1)
tmp80 = tl.broadcast_to(tmp79, [XBLOCK])
tmp86 = tl.load(in_ptr7 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp87 = tl.load(in_ptr9 + 2)
tmp88 = tl.broadcast_to(tmp87, [XBLOCK])
tmp94 = tl.load(in_ptr7 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp95 = tl.load(in_ptr9 + 3)
tmp96 = tl.broadcast_to(tmp95, [XBLOCK])
tmp102 = tl.load(in_ptr10 + 4 * x0, xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp103 = tl.load(in_ptr11 + x0, xmask)
tmp104 = tl.load(in_ptr12 + 0)
tmp105 = tl.broadcast_to(tmp104, [XBLOCK])
tmp110 = tl.load(in_ptr10 + (1 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp111 = tl.load(in_ptr12 + 1)
tmp112 = tl.broadcast_to(tmp111, [XBLOCK])
tmp118 = tl.load(in_ptr10 + (2 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp119 = tl.load(in_ptr12 + 2)
tmp120 = tl.broadcast_to(tmp119, [XBLOCK])
tmp126 = tl.load(in_ptr10 + (3 + 4 * x0), xmask, eviction_policy=
'evict_last').to(tl.int1)
tmp127 = tl.load(in_ptr12 + 3)
tmp128 = tl.broadcast_to(tmp127, [XBLOCK])
tmp5 = tmp2 + tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tl.where(tmp1, tmp5, tmp7)
tmp9 = -8999999815811072.0
tmp10 = tl.where(tmp0, tmp8, tmp9)
tmp15 = tmp2 + tmp14
tmp16 = tmp15 * tmp6
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = tl.where(tmp11, tmp17, tmp9)
tmp19 = triton_helpers.maximum(tmp10, tmp18)
tmp24 = tmp2 + tmp23
tmp25 = tmp24 * tmp6
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp20, tmp26, tmp9)
tmp28 = triton_helpers.maximum(tmp19, tmp27)
tmp33 = tmp2 + tmp32
tmp34 = tmp33 * tmp6
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp29, tmp35, tmp9)
tmp37 = triton_helpers.maximum(tmp28, tmp36)
tmp42 = tmp39 + tmp41
tmp43 = tmp42 * tmp6
tmp44 = tl.where(tmp38, tmp42, tmp43)
tmp45 = tl.where(tmp0, tmp44, tmp9)
tmp49 = tmp39 + tmp48
tmp50 = tmp49 * tmp6
tmp51 = tl.where(tmp46, tmp49, tmp50)
tmp52 = tl.where(tmp11, tmp51, tmp9)
tmp53 = triton_helpers.maximum(tmp45, tmp52)
tmp57 = tmp39 + tmp56
tmp58 = tmp57 * tmp6
tmp59 = tl.where(tmp54, tmp57, tmp58)
tmp60 = tl.where(tmp20, tmp59, tmp9)
tmp61 = triton_helpers.maximum(tmp53, tmp60)
tmp65 = tmp39 + tmp64
tmp66 = tmp65 * tmp6
tmp67 = tl.where(tmp62, tmp65, tmp66)
tmp68 = tl.where(tmp29, tmp67, tmp9)
tmp69 = triton_helpers.maximum(tmp61, tmp68)
tmp74 = tmp71 + tmp73
tmp75 = tmp74 * tmp6
tmp76 = tl.where(tmp70, tmp74, tmp75)
tmp77 = tl.where(tmp0, tmp76, tmp9)
tmp81 = tmp71 + tmp80
tmp82 = tmp81 * tmp6
tmp83 = tl.where(tmp78, tmp81, tmp82)
tmp84 = tl.where(tmp11, tmp83, tmp9)
tmp85 = triton_helpers.maximum(tmp77, tmp84)
tmp89 = tmp71 + tmp88
tmp90 = tmp89 * tmp6
tmp91 = tl.where(tmp86, tmp89, tmp90)
tmp92 = tl.where(tmp20, tmp91, tmp9)
tmp93 = triton_helpers.maximum(tmp85, tmp92)
tmp97 = tmp71 + tmp96
tmp98 = tmp97 * tmp6
tmp99 = tl.where(tmp94, tmp97, tmp98)
tmp100 = tl.where(tmp29, tmp99, tmp9)
tmp101 = triton_helpers.maximum(tmp93, tmp100)
tmp106 = tmp103 + tmp105
tmp107 = tmp106 * tmp6
tmp108 = tl.where(tmp102, tmp106, tmp107)
tmp109 = tl.where(tmp0, tmp108, tmp9)
tmp113 = tmp103 + tmp112
tmp114 = tmp113 * tmp6
tmp115 = tl.where(tmp110, tmp113, tmp114)
tmp116 = tl.where(tmp11, tmp115, tmp9)
tmp117 = triton_helpers.maximum(tmp109, tmp116)
tmp121 = tmp103 + tmp120
tmp122 = tmp121 * tmp6
tmp123 = tl.where(tmp118, tmp121, tmp122)
tmp124 = tl.where(tmp20, tmp123, tmp9)
tmp125 = triton_helpers.maximum(tmp117, tmp124)
tmp129 = tmp103 + tmp128
tmp130 = tmp129 * tmp6
tmp131 = tl.where(tmp126, tmp129, tmp130)
tmp132 = tl.where(tmp29, tmp131, tmp9)
tmp133 = triton_helpers.maximum(tmp125, tmp132)
tl.store(out_ptr0 + x0, tmp37, xmask)
tl.store(out_ptr1 + x0, tmp69, xmask)
tl.store(out_ptr2 + x0, tmp101, xmask)
tl.store(out_ptr3 + x0, tmp133, xmask)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9,
in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16,
out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, xmask).to(tl.int1)
tmp14 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr7 + x0, xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr9 + x2, xmask).to(tl.int1)
tmp24 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr11 + x0, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr12 + x1, xmask, eviction_policy='evict_last')
tmp33 = tl.load(in_ptr13 + x2, xmask).to(tl.int1)
tmp34 = tl.load(in_ptr14 + x1, xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr15 + x0, xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr16 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tl.where(tmp1, tmp4, tmp6)
tmp8 = -8999999815811072.0
tmp9 = tl.where(tmp0, tmp7, tmp8)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tmp16 = tmp14 + tmp15
tmp17 = tmp16 * tmp5
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tl.where(tmp0, tmp18, tmp8)
tmp21 = tmp19 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp26 = tmp24 + tmp25
tmp27 = tmp26 * tmp5
tmp28 = tl.where(tmp23, tmp26, tmp27)
tmp29 = tl.where(tmp0, tmp28, tmp8)
tmp31 = tmp29 - tmp30
tmp32 = tl_math.exp(tmp31)
tmp36 = tmp34 + tmp35
tmp37 = tmp36 * tmp5
tmp38 = tl.where(tmp33, tmp36, tmp37)
tmp39 = tl.where(tmp0, tmp38, tmp8)
tmp41 = tmp39 - tmp40
tmp42 = tl_math.exp(tmp41)
tl.store(out_ptr0 + x2, tmp12, xmask)
tl.store(out_ptr1 + x2, tmp22, xmask)
tl.store(out_ptr2 + x2, tmp32, xmask)
tl.store(out_ptr3 + x2, tmp42, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 1.0
tmp9 = tmp5 * tmp8
tmp10 = libdevice.expm1(tmp9)
tmp11 = tmp10 * tmp8
tmp12 = tl.where(tmp7, tmp9, tmp11)
tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype)
tmp14 = tl.where(tmp4, tmp12, tmp13)
tmp15 = tmp0 >= tmp3
tmp16 = tl.full([1], 8, tl.int64)
tmp17 = tmp0 < tmp16
tmp18 = tmp15 & tmp17
tmp19 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp18 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 > tmp6
tmp21 = tmp19 * tmp8
tmp22 = libdevice.expm1(tmp21)
tmp23 = tmp22 * tmp8
tmp24 = tl.where(tmp20, tmp21, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp18, tmp24, tmp25)
tmp27 = tmp0 >= tmp16
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tmp27 & tmp29
tmp31 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp30 & xmask,
eviction_policy='evict_last', other=0.0)
tmp32 = tmp31 > tmp6
tmp33 = tmp31 * tmp8
tmp34 = libdevice.expm1(tmp33)
tmp35 = tmp34 * tmp8
tmp36 = tl.where(tmp32, tmp33, tmp35)
tmp37 = tl.full(tmp36.shape, 0.0, tmp36.dtype)
tmp38 = tl.where(tmp30, tmp36, tmp37)
tmp39 = tmp0 >= tmp28
tl.full([1], 16, tl.int64)
tmp42 = tl.load(in_ptr3 + (4 * x1 + (-12 + x0)), tmp39 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp42 > tmp6
tmp44 = tmp42 * tmp8
tmp45 = libdevice.expm1(tmp44)
tmp46 = tmp45 * tmp8
tmp47 = tl.where(tmp43, tmp44, tmp46)
tmp48 = tl.full(tmp47.shape, 0.0, tmp47.dtype)
tmp49 = tl.where(tmp39, tmp47, tmp48)
tmp50 = tl.where(tmp30, tmp38, tmp49)
tmp51 = tl.where(tmp18, tmp26, tmp50)
tmp52 = tl.where(tmp4, tmp14, tmp51)
tl.store(out_ptr0 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_6(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp2 = tl.load(in_ptr2 + x0, xmask)
tmp3 = tl.load(in_ptr3 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp11 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp12 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp13 = tl.load(in_ptr3 + 1)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK])
tmp20 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp21 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp22 = tl.load(in_ptr3 + 2)
tmp23 = tl.broadcast_to(tmp22, [XBLOCK])
tmp29 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp30 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp31 = tl.load(in_ptr3 + 3)
tmp32 = tl.broadcast_to(tmp31, [XBLOCK])
tmp5 = tmp2 + tmp4
tmp6 = 4.0
tmp7 = tmp5 * tmp6
tmp8 = tl.where(tmp1, tmp5, tmp7)
tmp9 = -8999999815811072.0
tmp10 = tl.where(tmp0, tmp8, tmp9)
tmp15 = tmp2 + tmp14
tmp16 = tmp15 * tmp6
tmp17 = tl.where(tmp12, tmp15, tmp16)
tmp18 = tl.where(tmp11, tmp17, tmp9)
tmp19 = triton_helpers.maximum(tmp10, tmp18)
tmp24 = tmp2 + tmp23
tmp25 = tmp24 * tmp6
tmp26 = tl.where(tmp21, tmp24, tmp25)
tmp27 = tl.where(tmp20, tmp26, tmp9)
tmp28 = triton_helpers.maximum(tmp19, tmp27)
tmp33 = tmp2 + tmp32
tmp34 = tmp33 * tmp6
tmp35 = tl.where(tmp30, tmp33, tmp34)
tmp36 = tl.where(tmp29, tmp35, tmp9)
tmp37 = triton_helpers.maximum(tmp28, tmp36)
tl.store(out_ptr0 + x0, tmp37, xmask)
@triton.jit
def triton_poi_fused__softmax_add_leaky_relu_mul_where_7(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_ptr1 + x2, xmask).to(tl.int1)
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = 4.0
tmp6 = tmp4 * tmp5
tmp7 = tl.where(tmp1, tmp4, tmp6)
tmp8 = -8999999815811072.0
tmp9 = tl.where(tmp0, tmp7, tmp8)
tmp11 = tmp9 - tmp10
tmp12 = tl_math.exp(tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused__log_softmax_elu_8(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp8 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tmp8 > tmp1
tmp10 = tmp8 * tmp3
tmp11 = libdevice.expm1(tmp10)
tmp12 = tmp11 * tmp3
tmp13 = tl.where(tmp9, tmp10, tmp12)
tmp15 = tmp14 > tmp1
tmp16 = tmp14 * tmp3
tmp17 = libdevice.expm1(tmp16)
tmp18 = tmp17 * tmp3
tmp19 = tl.where(tmp15, tmp16, tmp18)
tmp20 = triton_helpers.maximum(tmp13, tmp19)
tmp22 = tmp21 > tmp1
tmp23 = tmp21 * tmp3
tmp24 = libdevice.expm1(tmp23)
tmp25 = tmp24 * tmp3
tmp26 = tl.where(tmp22, tmp23, tmp25)
tmp27 = triton_helpers.maximum(tmp20, tmp26)
tmp29 = tmp28 > tmp1
tmp30 = tmp28 * tmp3
tmp31 = libdevice.expm1(tmp30)
tmp32 = tmp31 * tmp3
tmp33 = tl.where(tmp29, tmp30, tmp32)
tmp34 = triton_helpers.maximum(tmp27, tmp33)
tmp35 = tmp7 - tmp34
tl.store(out_ptr0 + x2, tmp35, xmask)
@triton.jit
def triton_poi_fused__log_softmax_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (8, 1), (1, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (8, 1), (1, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (8, 1), (1, 1))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (8, 1), (1, 1))
assert_size_stride(primals_11, (16, 4), (4, 1))
assert_size_stride(primals_12, (8, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 1), (1, 1
), 0), out=buf1)
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 1), (1, 1
), 4), out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf1, buf2, buf3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_gt_1[grid(16)](primals_4, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_5, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1
), 0), out=buf10)
buf11 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf9, reinterpret_tensor(primals_6, (4, 1), (1, 1
), 4), out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf10, buf11, buf12, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_7, out=buf17)
del primals_7
buf18 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1,
1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_8, (4, 1), (1,
1), 4), out=buf19)
buf20 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf18, buf19, buf20, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_9, out=buf25)
del primals_9
buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1,
1), 0), out=buf26)
buf27 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf25, reinterpret_tensor(primals_10, (4, 1), (1,
1), 4), out=buf27)
buf28 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf26, buf27, buf28, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf13 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf21 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf29 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused__softmax_add_leaky_relu_mul_where_2[grid(4)](buf4,
buf3, buf1, buf2, buf12, buf10, buf11, buf20, buf18, buf19,
buf28, buf26, buf27, buf5, buf13, buf21, buf29, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf22 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf30 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_add_leaky_relu_mul_where_3[grid(16)](buf4,
buf3, buf1, buf2, buf5, buf12, buf10, buf11, buf13, buf20,
buf18, buf19, buf21, buf28, buf26, buf27, buf29, buf6, buf14,
buf22, buf30, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf1
del buf10
del buf11
del buf13
del buf18
del buf19
del buf2
del buf21
del buf26
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf6, buf7, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf8 = buf6
del buf6
extern_kernels.mm(buf7, buf0, out=buf8)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf14, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = buf14
del buf14
extern_kernels.mm(buf15, buf9, out=buf16)
buf23 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf22, buf23, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf24 = buf22
del buf22
extern_kernels.mm(buf23, buf17, out=buf24)
buf31 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf30, buf31, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf32 = buf30
del buf30
extern_kernels.mm(buf31, buf25, out=buf32)
buf33 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
triton_poi_fused_cat_5[grid(64)](buf8, buf16, buf24, buf32, buf33,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf34 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf33, primals_11, out=buf34)
buf35 = reinterpret_tensor(buf5, (4, 1), (1, 1), 0)
del buf5
extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1,
1), 0), out=buf35)
buf36 = reinterpret_tensor(buf29, (4, 1), (1, 1), 0)
del buf29
extern_kernels.mm(buf34, reinterpret_tensor(primals_12, (4, 1), (1,
1), 4), out=buf36)
buf37 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_add_leaky_relu_0[grid(16)](buf35, buf36, buf37, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf38 = reinterpret_tensor(buf27, (4, 1), (1, 4), 0)
del buf27
triton_poi_fused__softmax_add_leaky_relu_mul_where_6[grid(4)](buf4,
buf37, buf35, buf36, buf38, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf39 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_add_leaky_relu_mul_where_7[grid(16)](buf4,
buf37, buf35, buf36, buf38, buf39, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf35
del buf36
del buf38
buf40 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(16)](buf39, buf40, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf41 = buf39
del buf39
extern_kernels.mm(buf40, buf34, out=buf41)
buf42 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_elu_8[grid(16)](buf41, buf42, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf43 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__log_softmax_9[grid(16)](buf42, buf43, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf42
return (buf43, buf3, buf4, buf7, buf8, buf12, buf15, buf16, buf20,
buf23, buf24, buf28, buf31, buf32, buf37, buf40, buf41, buf43,
reinterpret_tensor(buf34, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_12, (1, 4), (1, 1), 4), reinterpret_tensor(primals_12, (1,
4), (1, 1), 0), reinterpret_tensor(buf33, (16, 4), (1, 16), 0),
reinterpret_tensor(primals_11, (4, 16), (1, 4), 0),
reinterpret_tensor(buf25, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_10, (1, 4), (1, 1), 4), reinterpret_tensor(primals_10, (1,
4), (1, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0),
reinterpret_tensor(buf17, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_8, (1, 4), (1, 1), 4), reinterpret_tensor(primals_8, (1, 4),
(1, 1), 0), reinterpret_tensor(buf9, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_6, (1, 4), (1, 1), 4),
reinterpret_tensor(primals_6, (1, 4), (1, 1), 0),
reinterpret_tensor(buf0, (4, 4), (1, 4), 0), reinterpret_tensor(
primals_3, (1, 4), (1, 1), 4), reinterpret_tensor(primals_3, (1, 4),
(1, 1), 0))
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
e = self._prepare_attentional_mechanism_input(Wh)
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
e = Wh1 + Wh2.T
return self.leakyrelu(e)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class GATNew(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
"""Dense version of GAT."""
super(GATNew, self).__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=
dropout, alpha=alpha, concat=False)
def forward(self, input_0, input_1):
primals_1 = self.attention_0.W
primals_3 = self.attention_0.a
primals_2 = self.attention_1.W
primals_6 = self.attention_1.a
primals_4 = self.attention_2.W
primals_8 = self.attention_2.a
primals_5 = self.attention_3.W
primals_10 = self.attention_3.a
primals_11 = self.out_att.W
primals_12 = self.out_att.a
primals_7 = input_0
primals_9 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
| XiangwenNing/pyGAT | GAT | false | 2,980 | [
"MIT"
] | 0 | c4bd8e2be044c6c7481d484875b3c318271cca9c | https://github.com/XiangwenNing/pyGAT/tree/c4bd8e2be044c6c7481d484875b3c318271cca9c | import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super().__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.empty(size=(2 * out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, h, adj):
Wh = torch.mm(h, self.W)
e = self._prepare_attentional_mechanism_input(Wh)
zero_vec = -9000000000000000.0 * torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, Wh)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def _prepare_attentional_mechanism_input(self, Wh):
Wh1 = torch.matmul(Wh, self.a[:self.out_features, :])
Wh2 = torch.matmul(Wh, self.a[self.out_features:, :])
e = Wh1 + Wh2.T
return self.leakyrelu(e)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
class Model(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads):
"""Dense version of GAT."""
super().__init__()
self.dropout = dropout
self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout,
alpha=alpha, concat=True) for _ in range(nheads)]
for i, attention in enumerate(self.attentions):
self.add_module('attention_{}'.format(i), attention)
self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=
dropout, alpha=alpha, concat=False)
def forward(self, x, adj):
x = F.dropout(x, self.dropout, training=self.training)
x = torch.cat([att(x, adj) for att in self.attentions], dim=1)
x = F.dropout(x, self.dropout, training=self.training)
x = F.elu(self.out_att(x, adj))
return F.log_softmax(x, dim=1)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'nfeat': 4, 'nhid': 4, 'nclass': 4, 'dropout': 0.5,
'alpha': 4, 'nheads': 4}]
|
GraphLevelAttentionLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/et/cetlocvlmaoxaj6afumfhalegfvgarx6k3pkajf2n7xt7gaq7uwu.py
# Topologically Sorted Source Nodes: [repeat, add, h_prime], Original ATen: [aten.repeat, aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# h_prime => tanh
# repeat => repeat
# Graph fragment:
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_3, [4, 1]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, %repeat), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_repeat_tanh_0 = async_compile.triton('triton_poi_fused_add_repeat_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_repeat_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yn/cynjrtcbvvngz6apfumshziobdiji5wn46qzzyqagqifi5yu2zwu.py
# Topologically Sorted Source Nodes: [semantic_attentions_1, semantic_attentions_2], Original ATen: [aten.mean, aten._softmax]
# Source node to ATen node mapping:
# semantic_attentions_1 => mean
# semantic_attentions_2 => amax, div, exp, sub, sum_1
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [1], True), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mean, [0], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [0], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_mean_1 = async_compile.triton('triton_per_fused__softmax_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 2],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_mean_1(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 2
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (2*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*r0)), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 / tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = triton_helpers.max2(tmp5, 1)[:, None]
tmp8 = tmp4 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = tmp9 / tmp12
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tn/ctnlap35mqlgzpamfq2vy76nqnjxkge3sqjgil2a4jlghhnvm7iy.py
# Topologically Sorted Source Nodes: [semantic_attentions_4, h_embedding, sum_1, h_embedding_1], Original ATen: [aten.repeat, aten.mul, aten.sum, aten.squeeze]
# Source node to ATen node mapping:
# h_embedding => mul
# h_embedding_1 => squeeze
# semantic_attentions_4 => repeat_1
# sum_1 => sum_2
# Graph fragment:
# %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%view_1, [1, 2, 4]), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %repeat_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [0]), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%sum_2,), kwargs = {})
triton_poi_fused_mul_repeat_squeeze_sum_2 = async_compile.triton('triton_poi_fused_mul_repeat_squeeze_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_repeat_squeeze_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_repeat_squeeze_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr1 + (1))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [repeat, add, h_prime], Original ATen: [aten.repeat, aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_repeat_tanh_0.run(buf1, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm]
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2)
buf5 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [semantic_attentions_1, semantic_attentions_2], Original ATen: [aten.mean, aten._softmax]
triton_per_fused__softmax_mean_1.run(buf2, buf5, 1, 2, grid=grid(1), stream=stream0)
del buf2
buf6 = empty_strided_cuda((2, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [semantic_attentions_4, h_embedding, sum_1, h_embedding_1], Original ATen: [aten.repeat, aten.mul, aten.sum, aten.squeeze]
triton_poi_fused_mul_repeat_squeeze_sum_2.run(primals_2, buf5, buf6, 8, grid=grid(8), stream=stream0)
return (buf6, buf5, primals_2, buf1, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class GraphLevelAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features):
super(GraphLevelAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.my_coefs = None
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.b = nn.Parameter(torch.zeros(size=(1, out_features)))
nn.init.xavier_uniform_(self.b.data, gain=1.414)
self.q = nn.Parameter(torch.zeros(size=(1, out_features)))
nn.init.xavier_uniform_(self.q.data, gain=1.414)
self.Tanh = nn.Tanh()
self.leakyrelu = nn.LeakyReLU()
def forward(self, total_embeds, P=2):
h = torch.mm(total_embeds, self.W)
h_prime = self.Tanh(h + self.b.repeat(h.size()[0], 1))
semantic_attentions = torch.mm(h_prime, torch.t(self.q)).view(P, -1)
N = semantic_attentions.size()[1]
semantic_attentions = semantic_attentions.mean(dim=1, keepdim=True)
semantic_attentions = F.softmax(semantic_attentions, dim=0)
self.my_coefs = semantic_attentions
semantic_attentions = semantic_attentions.view(P, 1, 1)
semantic_attentions = semantic_attentions.repeat(1, N, self.in_features
)
input_embedding = total_embeds.view(P, N, self.in_features)
h_embedding = torch.mul(input_embedding, semantic_attentions)
h_embedding = torch.sum(h_embedding, dim=0).squeeze()
return h_embedding
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_repeat_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_per_fused__softmax_mean_1(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 2
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 2 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * r0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 2.0
tmp4 = tmp2 / tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = triton_helpers.max2(tmp5, 1)[:, None]
tmp8 = tmp4 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.sum(tmp10, 1)[:, None]
tmp13 = tmp9 / tmp12
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp13, None)
@triton.jit
def triton_poi_fused_mul_repeat_squeeze_sum_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp5 = tl.load(in_ptr1 + 1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp3 = tmp0 * tmp2
tmp7 = tmp4 * tmp6
tmp8 = tmp3 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_repeat_tanh_0[grid(16)](buf1, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (4, 1), (1, 4
), 0), out=buf2)
buf5 = empty_strided_cuda((2, 1), (1, 1), torch.float32)
triton_per_fused__softmax_mean_1[grid(1)](buf2, buf5, 1, 2, XBLOCK=
1, num_warps=2, num_stages=1)
del buf2
buf6 = empty_strided_cuda((2, 4), (4, 1), torch.float32)
triton_poi_fused_mul_repeat_squeeze_sum_2[grid(8)](primals_2, buf5,
buf6, 8, XBLOCK=8, num_warps=1, num_stages=1)
return buf6, buf5, primals_2, buf1, buf5, primals_4
class GraphLevelAttentionLayerNew(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features):
super(GraphLevelAttentionLayerNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.my_coefs = None
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.b = nn.Parameter(torch.zeros(size=(1, out_features)))
nn.init.xavier_uniform_(self.b.data, gain=1.414)
self.q = nn.Parameter(torch.zeros(size=(1, out_features)))
nn.init.xavier_uniform_(self.q.data, gain=1.414)
self.Tanh = nn.Tanh()
self.leakyrelu = nn.LeakyReLU()
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0):
primals_1 = self.W
primals_3 = self.b
primals_4 = self.q
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Yonggie/HsCTRD | GraphLevelAttentionLayer | false | 2,981 | [
"Apache-2.0"
] | 0 | d4541f13630a6abd0e17b116ad6aeeab74f54f1c | https://github.com/Yonggie/HsCTRD/tree/d4541f13630a6abd0e17b116ad6aeeab74f54f1c | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.my_coefs = None
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.b = nn.Parameter(torch.zeros(size=(1, out_features)))
nn.init.xavier_uniform_(self.b.data, gain=1.414)
self.q = nn.Parameter(torch.zeros(size=(1, out_features)))
nn.init.xavier_uniform_(self.q.data, gain=1.414)
self.Tanh = nn.Tanh()
self.leakyrelu = nn.LeakyReLU()
def forward(self, total_embeds, P=2):
h = torch.mm(total_embeds, self.W)
h_prime = self.Tanh(h + self.b.repeat(h.size()[0], 1))
semantic_attentions = torch.mm(h_prime, torch.t(self.q)).view(P, -1)
N = semantic_attentions.size()[1]
semantic_attentions = semantic_attentions.mean(dim=1, keepdim=True)
semantic_attentions = F.softmax(semantic_attentions, dim=0)
self.my_coefs = semantic_attentions
semantic_attentions = semantic_attentions.view(P, 1, 1)
semantic_attentions = semantic_attentions.repeat(1, N, self.in_features
)
input_embedding = total_embeds.view(P, N, self.in_features)
h_embedding = torch.mul(input_embedding, semantic_attentions)
h_embedding = torch.sum(h_embedding, dim=0).squeeze()
return h_embedding
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
UpsampleBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ko/ckoyg3wc5qizxkiab7mlwvvzc5aw7hphof45bkxr5zq27h74ryn4.py
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_6 => convolution_1
# x_7 => gt, mul, where
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %primals_4, %primals_5, [1], [12], [1], False, [0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution_1, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr1 + (x3), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kh/ckhhky3clmsuriq2rzr7ybgt25y63c26q3wxei5xrryybvs5ubhe.py
# Topologically Sorted Source Nodes: [shortcut, x_8, x_9, add], Original ATen: [aten.convolution, aten.leaky_relu, aten.add]
# Source node to ATen node mapping:
# add => add
# shortcut => convolution
# x_8 => convolution_2
# x_9 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view_1, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_6, %primals_7, [1], [12], [1], False, [0], 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_2, %mul_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%where_1, %convolution), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr0 + (x3), xmask)
tmp9 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 + tmp10
tl.store(out_ptr0 + (x3), tmp4, xmask)
tl.store(in_out_ptr0 + (x3), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 1, 25), (25, 25, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 25), (100, 25, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [shortcut], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_4, stride=(1,), padding=(12,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6, x_7], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_5, buf2, buf3, 64, grid=grid(64), stream=stream0)
del buf1
del primals_5
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1,), padding=(12,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf6 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [shortcut, x_8, x_9, add], Original ATen: [aten.convolution, aten.leaky_relu, aten.add]
triton_poi_fused_add_convolution_leaky_relu_1.run(buf6, buf4, primals_7, primals_3, buf5, 64, grid=grid(64), stream=stream0)
del buf4
del primals_3
del primals_7
return (buf6, primals_2, primals_4, primals_6, reinterpret_tensor(primals_1, (4, 1, 4), (4, 1, 1), 0), buf2, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1, 25), (25, 25, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 25), (100, 25, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PixelShuffle1d(nn.Module):
def __init__(self, upscale_factor):
super().__init__()
self.upscale_factor = upscale_factor
def forward(self, x):
batch_size = x.shape[0]
short_channel_len = x.shape[1]
short_width = x.shape[2]
long_channel_len = short_channel_len // self.upscale_factor
long_width = self.upscale_factor * short_width
x = x.contiguous().view([batch_size, self.upscale_factor,
long_channel_len, short_width])
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(batch_size, long_channel_len, long_width)
return x
class SamePaddingConv1d(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super().__init__()
self.conv = nn.Conv1d(in_dim, out_dim, kernel_size, padding=int((
kernel_size - 1) / 2))
def forward(self, x):
return self.conv(x)
class UpConv1d(nn.Module):
def __init__(self, in_dim, out_dim, scale_factor, kernel_size):
super().__init__()
self.pixel_shuffer = PixelShuffle1d(scale_factor)
self.conv = SamePaddingConv1d(in_dim // scale_factor, out_dim,
kernel_size)
def forward(self, x):
x = self.pixel_shuffer(x)
return self.conv(x)
class UpsampleBlock(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.deconv = UpConv1d(in_dim, out_dim, scale_factor=4, kernel_size=1)
self.conv_1 = UpConv1d(in_dim, out_dim, scale_factor=4, kernel_size=25)
self.conv_2 = nn.Conv1d(out_dim, out_dim, 25, padding=12)
self.LReLU_1 = nn.LeakyReLU(0.2)
self.LReLU_2 = nn.LeakyReLU(0.2)
def forward(self, input):
shortcut = self.deconv(input)
x = input
x = self.conv_1(x)
x = self.LReLU_1(x)
x = self.conv_2(x)
x = self.LReLU_2(x)
return x + shortcut
def get_inputs():
return [torch.rand([4, 4, 1, 1])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr1 + x3, tmp7, xmask)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_out_ptr0 + x3, xmask)
tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp10 = tmp8 + tmp9
tmp11 = tmp7 + tmp10
tl.store(out_ptr0 + x3, tmp4, xmask)
tl.store(in_out_ptr0 + x3, tmp11, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 1, 25), (25, 25, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 25), (100, 25, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4), (4, 4, 1), 0), primals_2, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (4, 4, 4), (16, 4, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4,
1, 4), (4, 4, 1), 0), primals_4, stride=(1,), padding=(12,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(64)](buf1, primals_5,
buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1,),
padding=(12,), dilation=(1,), transposed=False, output_padding=
(0,), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 4), (16, 4, 1))
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf6 = buf0
del buf0
triton_poi_fused_add_convolution_leaky_relu_1[grid(64)](buf6, buf4,
primals_7, primals_3, buf5, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf4
del primals_3
del primals_7
return buf6, primals_2, primals_4, primals_6, reinterpret_tensor(primals_1,
(4, 1, 4), (4, 1, 1), 0), buf2, buf3, buf5
class PixelShuffle1d(nn.Module):
def __init__(self, upscale_factor):
super().__init__()
self.upscale_factor = upscale_factor
def forward(self, x):
batch_size = x.shape[0]
short_channel_len = x.shape[1]
short_width = x.shape[2]
long_channel_len = short_channel_len // self.upscale_factor
long_width = self.upscale_factor * short_width
x = x.contiguous().view([batch_size, self.upscale_factor,
long_channel_len, short_width])
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(batch_size, long_channel_len, long_width)
return x
class SamePaddingConv1d(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super().__init__()
self.conv = nn.Conv1d(in_dim, out_dim, kernel_size, padding=int((
kernel_size - 1) / 2))
def forward(self, x):
return self.conv(x)
class UpConv1d(nn.Module):
def __init__(self, in_dim, out_dim, scale_factor, kernel_size):
super().__init__()
self.pixel_shuffer = PixelShuffle1d(scale_factor)
self.conv = SamePaddingConv1d(in_dim // scale_factor, out_dim,
kernel_size)
def forward(self, x):
x = self.pixel_shuffer(x)
return self.conv(x)
class UpsampleBlockNew(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.deconv = UpConv1d(in_dim, out_dim, scale_factor=4, kernel_size=1)
self.conv_1 = UpConv1d(in_dim, out_dim, scale_factor=4, kernel_size=25)
self.conv_2 = nn.Conv1d(out_dim, out_dim, 25, padding=12)
self.LReLU_1 = nn.LeakyReLU(0.2)
self.LReLU_2 = nn.LeakyReLU(0.2)
def forward(self, input_0):
primals_2 = self.deconv.conv.conv.weight
primals_3 = self.deconv.conv.conv.bias
primals_4 = self.conv_1.conv.conv.weight
primals_5 = self.conv_1.conv.conv.bias
primals_6 = self.conv_2.weight
primals_7 = self.conv_2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| Yotsuyubi/drumgan | UpsampleBlock | false | 2,982 | [
"MIT"
] | 0 | eb6a9aa8b5c0d64bad65e4dbd14d444b7a859a29 | https://github.com/Yotsuyubi/drumgan/tree/eb6a9aa8b5c0d64bad65e4dbd14d444b7a859a29 | import torch
import torch.nn as nn
class PixelShuffle1d(nn.Module):
def __init__(self, upscale_factor):
super().__init__()
self.upscale_factor = upscale_factor
def forward(self, x):
batch_size = x.shape[0]
short_channel_len = x.shape[1]
short_width = x.shape[2]
long_channel_len = short_channel_len // self.upscale_factor
long_width = self.upscale_factor * short_width
x = x.contiguous().view([batch_size, self.upscale_factor,
long_channel_len, short_width])
x = x.permute(0, 2, 3, 1).contiguous()
x = x.view(batch_size, long_channel_len, long_width)
return x
class SamePaddingConv1d(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super().__init__()
self.conv = nn.Conv1d(in_dim, out_dim, kernel_size, padding=int((
kernel_size - 1) / 2))
def forward(self, x):
return self.conv(x)
class UpConv1d(nn.Module):
def __init__(self, in_dim, out_dim, scale_factor, kernel_size):
super().__init__()
self.pixel_shuffer = PixelShuffle1d(scale_factor)
self.conv = SamePaddingConv1d(in_dim // scale_factor, out_dim,
kernel_size)
def forward(self, x):
x = self.pixel_shuffer(x)
return self.conv(x)
class Model(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.deconv = UpConv1d(in_dim, out_dim, scale_factor=4, kernel_size=1)
self.conv_1 = UpConv1d(in_dim, out_dim, scale_factor=4, kernel_size=25)
self.conv_2 = nn.Conv1d(out_dim, out_dim, 25, padding=12)
self.LReLU_1 = nn.LeakyReLU(0.2)
self.LReLU_2 = nn.LeakyReLU(0.2)
def forward(self, input):
shortcut = self.deconv(input)
x = input
x = self.conv_1(x)
x = self.LReLU_1(x)
x = self.conv_2(x)
x = self.LReLU_2(x)
return x + shortcut
def get_inputs():
return [torch.rand([4, 4, 1, 1])]
def get_init_inputs():
return [4, 4]
|
Generator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nr/cnrkptzsuv7qm3ss6i6xgoxkou23z76h2vmwqkwz2zkgpdbxhedc.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_1, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/32/c32vfxouqe74ea5scuzrdhpd7r6adxwu4bzarm4icjfnb47jbizg.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Generator(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, hidden_size, vocab_size):
super(Generator, self).__init__()
self.proj = nn.Linear(hidden_size, vocab_size, bias=False)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'vocab_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2
class GeneratorNew(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, hidden_size, vocab_size):
super(GeneratorNew, self).__init__()
self.proj = nn.Linear(hidden_size, vocab_size, bias=False)
def forward(self, input_0):
primals_1 = self.proj.weight
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| Yixuan-Lee/yixuan-lee.github.io | Generator | false | 2,983 | [
"MIT"
] | 0 | 139dd141544302ca1802a6104f7db7aeb1ace825 | https://github.com/Yixuan-Lee/yixuan-lee.github.io/tree/139dd141544302ca1802a6104f7db7aeb1ace825 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
"""Define standard linear + softmax generation step."""
def __init__(self, hidden_size, vocab_size):
super().__init__()
self.proj = nn.Linear(hidden_size, vocab_size, bias=False)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SamePaddingConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6r/c6r2hfbul2m3b4uoif2puxqk3lufnoiljsdrzkfbped7lvfnb36w.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1], [1], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 3)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 3), (12, 3, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 12, grid=grid(12), stream=stream0)
del primals_2
return (reinterpret_tensor(buf1, (4, 3), (3, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SamePaddingConv1d(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super().__init__()
self.conv = nn.Conv1d(in_dim, out_dim, kernel_size, padding=int((
kernel_size - 1) / 2))
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_dim': 4, 'out_dim': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 3
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(1,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 3), (12, 3, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(12)](buf1, primals_2, 12,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_2
return reinterpret_tensor(buf1, (4, 3), (3, 1), 0
), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0)
class SamePaddingConv1dNew(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super().__init__()
self.conv = nn.Conv1d(in_dim, out_dim, kernel_size, padding=int((
kernel_size - 1) / 2))
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| Yotsuyubi/drumgan | SamePaddingConv1d | false | 2,984 | [
"MIT"
] | 0 | eb6a9aa8b5c0d64bad65e4dbd14d444b7a859a29 | https://github.com/Yotsuyubi/drumgan/tree/eb6a9aa8b5c0d64bad65e4dbd14d444b7a859a29 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super().__init__()
self.conv = nn.Conv1d(in_dim, out_dim, kernel_size, padding=int((
kernel_size - 1) / 2))
def forward(self, x):
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
MultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5t/c5txhhxqwgxdtwmgseoxqhkhd47b4q5u6chxi7shnrpkkyfwlubq.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2j/c2jp3ao3qc7s2piwrwrsa4nfhuwr2y6p5vmtnr3avjozont334gm.py
# Topologically Sorted Source Nodes: [scores, max_1], Original ATen: [aten.div, aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# scores => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%div,), kwargs = {})
triton_per_fused_div_max_1 = async_compile.triton('triton_per_fused_div_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_max_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_max_1(in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sh/cshxyr7ubygdgkvu2jcbl36m76i56v5sozjjpqddp4enliefpxwc.py
# Topologically Sorted Source Nodes: [scores, sub, scores_1], Original ATen: [aten.div, aten.sub, aten._softmax]
# Source node to ATen node mapping:
# scores => div
# scores_1 => amax, exp, sub_1, sum_1
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_sub_2 = async_compile.triton('triton_poi_fused__softmax_div_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp7 - tmp4
tmp9 = triton_helpers.maximum(tmp5, tmp8)
tmp11 = tmp10 * tmp1
tmp12 = tmp11 - tmp4
tmp13 = triton_helpers.maximum(tmp9, tmp12)
tmp15 = tmp14 * tmp1
tmp16 = tmp15 - tmp4
tmp17 = triton_helpers.maximum(tmp13, tmp16)
tmp18 = tmp5 - tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp8 - tmp17
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp17
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp16 - tmp17
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tl.store(out_ptr0 + (x0), tmp17, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ab/cabhniu4tyer6exl4prbgpjbgmqgwyhnmgevuz4airuaxunzvkui.py
# Topologically Sorted Source Nodes: [scores, sub, scores_1, isnan, scores_2], Original ATen: [aten.div, aten.sub, aten._softmax, aten.isnan, aten.masked_fill]
# Source node to ATen node mapping:
# isnan => isnan
# scores => div
# scores_1 => amax, div_1, exp, sub_1, sum_1
# scores_2 => full_default, where
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_1,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default, %div_1), kwargs = {})
triton_poi_fused__softmax_div_isnan_masked_fill_sub_3 = async_compile.triton('triton_poi_fused__softmax_div_isnan_masked_fill_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_isnan_masked_fill_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_isnan_masked_fill_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = libdevice.isnan(tmp10).to(tl.int1)
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp10)
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [scores, max_1], Original ATen: [aten.div, aten.max]
triton_per_fused_div_max_1.run(buf5, buf6, 1, 1024, grid=grid(1), stream=stream0)
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32)
# Topologically Sorted Source Nodes: [scores, sub, scores_1], Original ATen: [aten.div, aten.sub, aten._softmax]
triton_poi_fused__softmax_div_sub_2.run(buf5, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores, sub, scores_1, isnan, scores_2], Original ATen: [aten.div, aten.sub, aten._softmax, aten.isnan, aten.masked_fill]
triton_poi_fused__softmax_div_isnan_masked_fill_sub_3.run(buf5, buf6, buf7, buf8, buf9, 1024, grid=grid(1024), stream=stream0)
del buf7
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_8, buf10, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_8
buf11 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (64, 4, 1), (4, 1, 0), 0), out=buf11)
return (reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 1, 4), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf9, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf10, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It has projection layer for getting keys, queries and values. Followed by attention.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
def head_split(self, x):
new_x_shape = x.size()[:-1] + (self.h, self.d_k)
return x.view(*new_x_shape).transpose(-2, -3)
def forward(self, q, k, v, mask=None):
origin_shape = q.size()
if not self.kq_same:
q = self.head_split(self.q_linear(q))
else:
q = self.head_split(self.k_linear(q))
k = self.head_split(self.k_linear(k))
v = self.head_split(self.v_linear(v))
output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask)
output = output.transpose(-2, -3).reshape(origin_shape)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, d_k, mask=None):
"""
This is called by Multi-head attention object to find the values.
"""
scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5
if mask is not None:
scores = scores.masked_fill(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
scores = scores.masked_fill(torch.isnan(scores), 0)
output = torch.matmul(scores, v)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'n_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused_div_max_1(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused__softmax_div_sub_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp7 - tmp4
tmp9 = triton_helpers.maximum(tmp5, tmp8)
tmp11 = tmp10 * tmp1
tmp12 = tmp11 - tmp4
tmp13 = triton_helpers.maximum(tmp9, tmp12)
tmp15 = tmp14 * tmp1
tmp16 = tmp15 - tmp4
tmp17 = triton_helpers.maximum(tmp13, tmp16)
tmp18 = tmp5 - tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp8 - tmp17
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp17
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp16 - tmp17
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tl.store(out_ptr0 + x0, tmp17, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused__softmax_div_isnan_masked_fill_sub_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = libdevice.isnan(tmp10).to(tl.int1)
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp10)
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_9, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf0, primals_3, buf3, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_5, buf4, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_div_max_1[grid(1)](buf5, buf6, 1, 1024, num_warps=
8, num_stages=1)
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
triton_poi_fused__softmax_div_sub_2[grid(256)](buf5, buf6, buf7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused__softmax_div_isnan_masked_fill_sub_3[grid(1024)](buf5,
buf6, buf7, buf8, buf9, 1024, XBLOCK=128, num_warps=4, num_stages=1
)
del buf7
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(64, 4)](buf2, primals_8, buf10, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf11 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (64, 4, 1), (4, 1, 0), 0), out=buf11)
return reinterpret_tensor(buf11, (4, 4, 4, 4), (64, 16, 1, 4), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_9, (64, 4), (4, 1), 0
), buf5, buf6, reinterpret_tensor(buf9, (64, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf10, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0)
class MultiHeadAttentionNew(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It has projection layer for getting keys, queries and values. Followed by attention.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
def head_split(self, x):
new_x_shape = x.size()[:-1] + (self.h, self.d_k)
return x.view(*new_x_shape).transpose(-2, -3)
@staticmethod
def scaled_dot_product_attention(q, k, v, d_k, mask=None):
"""
This is called by Multi-head attention object to find the values.
"""
scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5
if mask is not None:
scores = scores.masked_fill(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
scores = scores.masked_fill(torch.isnan(scores), 0)
output = torch.matmul(scores, v)
return output
def forward(self, input_0, input_1, input_2):
primals_2 = self.q_linear.weight
primals_3 = self.q_linear.bias
primals_4 = self.k_linear.weight
primals_5 = self.k_linear.bias
primals_7 = self.v_linear.weight
primals_8 = self.v_linear.bias
primals_1 = input_0
primals_6 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| Yingting-dev/ReChorus | MultiHeadAttention | false | 2,985 | [
"MIT"
] | 0 | a16bc1e42f3e90e889133d7476c52ada44db573b | https://github.com/Yingting-dev/ReChorus/tree/a16bc1e42f3e90e889133d7476c52ada44db573b | import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class Model(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It has projection layer for getting keys, queries and values. Followed by attention.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
def head_split(self, x):
new_x_shape = x.size()[:-1] + (self.h, self.d_k)
return x.view(*new_x_shape).transpose(-2, -3)
def forward(self, q, k, v, mask=None):
origin_shape = q.size()
if not self.kq_same:
q = self.head_split(self.q_linear(q))
else:
q = self.head_split(self.k_linear(q))
k = self.head_split(self.k_linear(k))
v = self.head_split(self.v_linear(v))
output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask)
output = output.transpose(-2, -3).reshape(origin_shape)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, d_k, mask=None):
"""
This is called by Multi-head attention object to find the values.
"""
scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5
if mask is not None:
scores = scores.masked_fill(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
scores = scores.masked_fill(torch.isnan(scores), 0)
output = torch.matmul(scores, v)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
LNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wq/cwq4v2t2casrvnd4kuih7ltbgx5737ucaocshzecbzd5g64pkqnq.py
# Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# lnn_out => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl_math.abs(tmp0)
tmp2 = 1e-07
tmp3 = tmp1 + tmp2
tmp4 = libdevice.log1p(tmp3)
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bm/cbmeb2qgbivgi5ugwn3lxt7jt7xsbz2udthbkyvylpkqrid3yapc.py
# Topologically Sorted Source Nodes: [lnn_out, lnn_exp, relu], Original ATen: [aten.clone, aten.expm1, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# lnn_exp => expm1
# lnn_out => clone_1
# relu => relu
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%clone_1,), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%expm1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_clone_expm1_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_clone_expm1_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_expm1_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_expm1_relu_threshold_backward_1(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = libdevice.expm1(tmp0)
tmp2 = tl.full([1, 1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp4 = 0.0
tmp5 = tmp3 <= tmp4
tl.store(out_ptr0 + (x2 + (4*y3)), tmp3, xmask & ymask)
tl.store(out_ptr1 + (x2 + (4*y3)), tmp5, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [lnn_out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [lnn_out, lnn_exp, relu], Original ATen: [aten.clone, aten.expm1, aten.relu, aten.threshold_backward]
triton_poi_fused_clone_expm1_relu_threshold_backward_1.run(buf1, buf2, buf3, 64, 4, grid=grid(64, 4), stream=stream0)
return (reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), buf1, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn.functional as F
import torch.utils.data
class LNN(torch.nn.Module):
"""
A pytorch implementation of LNN layer
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``.
Arguments
- **in_features** : Embedding of feature.
- **num_fields**: int.The field size of feature.
- **LNN_dim**: int.The number of Logarithmic neuron.
- **bias**: bool.Whether or not use bias in LNN.
"""
def __init__(self, num_fields, embed_dim, LNN_dim, bias=False):
super(LNN, self).__init__()
self.num_fields = num_fields
self.embed_dim = embed_dim
self.LNN_dim = LNN_dim
self.lnn_output_dim = LNN_dim * embed_dim
self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields, embedding_size)``
"""
embed_x_abs = torch.abs(x)
embed_x_afn = torch.add(embed_x_abs, 1e-07)
embed_x_log = torch.log1p(embed_x_afn)
lnn_out = torch.matmul(self.weight, embed_x_log)
if self.bias is not None:
lnn_out += self.bias
lnn_exp = torch.expm1(lnn_out)
output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_fields': 4, 'embed_dim': 4, 'LNN_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl_math.abs(tmp0)
tmp2 = 1e-07
tmp3 = tmp1 + tmp2
tmp4 = libdevice.log1p(tmp3)
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_expm1_relu_threshold_backward_1(in_ptr0,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = libdevice.expm1(tmp0)
tmp2 = tl.full([1, 1], 0, tl.int32)
tmp3 = triton_helpers.maximum(tmp2, tmp1)
tmp4 = 0.0
tmp5 = tmp3 <= tmp4
tl.store(out_ptr0 + (x2 + 4 * y3), tmp3, xmask & ymask)
tl.store(out_ptr1 + (x2 + 4 * y3), tmp5, xmask & ymask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_clone_expm1_relu_threshold_backward_1[grid(64, 4)](
buf1, buf2, buf3, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4,
num_stages=1)
return reinterpret_tensor(buf2, (16, 16), (16, 1), 0), reinterpret_tensor(
buf0, (64, 4), (4, 1), 0), buf1, buf3
class LNNNew(torch.nn.Module):
"""
A pytorch implementation of LNN layer
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``.
Arguments
- **in_features** : Embedding of feature.
- **num_fields**: int.The field size of feature.
- **LNN_dim**: int.The number of Logarithmic neuron.
- **bias**: bool.Whether or not use bias in LNN.
"""
def __init__(self, num_fields, embed_dim, LNN_dim, bias=False):
super(LNNNew, self).__init__()
self.num_fields = num_fields
self.embed_dim = embed_dim
self.LNN_dim = LNN_dim
self.lnn_output_dim = LNN_dim * embed_dim
self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| ZEKAICHEN/RecSys | LNN | false | 2,986 | [
"MIT"
] | 0 | 7ab66b4a6cee620cc4baeb00f916ff329834f903 | https://github.com/ZEKAICHEN/RecSys/tree/7ab66b4a6cee620cc4baeb00f916ff329834f903 | import math
import torch
import torch.nn.functional as F
import torch.utils.data
class Model(torch.nn.Module):
"""
A pytorch implementation of LNN layer
Input shape
- A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.
Output shape
- 2D tensor with shape:``(batch_size,LNN_dim*embedding_size)``.
Arguments
- **in_features** : Embedding of feature.
- **num_fields**: int.The field size of feature.
- **LNN_dim**: int.The number of Logarithmic neuron.
- **bias**: bool.Whether or not use bias in LNN.
"""
def __init__(self, num_fields, embed_dim, LNN_dim, bias=False):
super().__init__()
self.num_fields = num_fields
self.embed_dim = embed_dim
self.LNN_dim = LNN_dim
self.lnn_output_dim = LNN_dim * embed_dim
self.weight = torch.nn.Parameter(torch.Tensor(LNN_dim, num_fields))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(LNN_dim, embed_dim))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields, embedding_size)``
"""
embed_x_abs = torch.abs(x)
embed_x_afn = torch.add(embed_x_abs, 1e-07)
embed_x_log = torch.log1p(embed_x_afn)
lnn_out = torch.matmul(self.weight, embed_x_log)
if self.bias is not None:
lnn_out += self.bias
lnn_exp = torch.expm1(lnn_out)
output = F.relu(lnn_exp).contiguous().view(-1, self.lnn_output_dim)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
FirstStage | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5x/c5x3n4l2c3tmjgjbrmfnuutejnjsf2nzbpdcgmmbfnonuxxf2t6c.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4l/c4lnr6hlwauhxvzhbbzd3hw5zcgz4bbbiyvmij3cpth576dsbz7i.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_2 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x3 = (xindex // 32)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-65) + (2*x0) + (128*x3)), tmp10, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-64) + (2*x0) + (128*x3)), tmp16, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-63) + (2*x0) + (128*x3)), tmp23, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (128*x3)), tmp30, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (128*x3)), tmp33, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x3)), tmp36, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (63 + (2*x0) + (128*x3)), tmp43, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x3)), tmp46, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x3)), tmp49, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x4), tmp51, None)
tl.store(out_ptr1 + (x4), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/s2/cs2pbgtdmxsduxykrhuugmksh3tij756npwvjdpkmdrn2f3bew65.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_4 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5p/c5ptpdqovkjru6r326ys3aseqlhocbsz65vqkcf53j6orm2bw4tw.py
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_5 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x3 = (xindex // 16)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-33) + (2*x0) + (64*x3)), tmp10, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-32) + (2*x0) + (64*x3)), tmp16, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-31) + (2*x0) + (64*x3)), tmp23, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (64*x3)), tmp30, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (64*x3)), tmp33, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x3)), tmp36, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + (2*x0) + (64*x3)), tmp43, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x3)), tmp46, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x3)), tmp49, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x4), tmp51, None)
tl.store(out_ptr1 + (x4), tmp76, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zr/czr445wr45alwcn7z6gnpovyy5wicbszl4buf42klqvbebzp6p2s.py
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_7 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_relu_4 = async_compile.triton('triton_poi_fused_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xy/cxycl6zut2ibasirywoizo3zl6z7hegqqbqc5qwzu77hwdfj2ovg.py
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_8 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 8) % 8
x0 = xindex % 8
x3 = (xindex // 8)
x4 = xindex
tmp0 = (-1) + (2*x1)
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = (-1) + (2*x0)
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((-17) + (2*x0) + (32*x3)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 2*x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + ((-16) + (2*x0) + (32*x3)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + ((-15) + (2*x0) + (32*x3)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2*x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + ((-1) + (2*x0) + (32*x3)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + ((2*x0) + (32*x3)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x3)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (15 + (2*x0) + (32*x3)), tmp43 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x3)), tmp46 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x3)), tmp49 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + (x4), tmp51, xmask)
tl.store(out_ptr1 + (x4), tmp76, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wa/cwab23gvlutosfmrcjw3unnvfkajepxmfbymx2yxqhqyal7wuj43.py
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_10 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (4, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_4, (4, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_5, (1, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_6, (16, 1, 9, 9), (81, 81, 9, 1))
assert_size_stride(primals_7, (16, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_8, (4, 16, 1, 1), (16, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 65536, grid=grid(65536), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 32, 32), (4096, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf5, 16384, grid=grid(16384), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 16, 16), (1024, 256, 16, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf9, 4096, grid=grid(4096), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 1024, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_5, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 8, 8), (64, 64, 8, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_11], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_6, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 8, 8), (1024, 64, 8, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [out_12], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf15, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 16, 8, 8), (1024, 64, 8, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.relu]
triton_poi_fused_relu_4.run(buf17, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [out_15], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 8, 8), (256, 64, 8, 1))
return (buf18, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf15, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 9, 9), (243, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 9, 9), (324, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 9, 9), (324, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 5, 5), (100, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 1, 9, 9), (81, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16, 1, 1), (16, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False)
def conv5x5(in_planes, out_planes, stride=1):
"""5x5 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, bias=False)
def conv9x9(in_planes, out_planes, stride=1):
"""9x9 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=9, stride=stride,
padding=4, bias=False)
def maxpool3x3(stride=2):
"""3x3 maxpooling with padding"""
return nn.MaxPool2d(kernel_size=3, stride=stride, padding=1)
class FirstStage(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, out_channels, stride=1):
super(FirstStage, self).__init__()
self.out_channels = out_channels
self.conv1 = conv9x9(3, planes)
self.pool1 = maxpool3x3()
self.conv2 = conv9x9(planes, planes)
self.pool2 = maxpool3x3()
self.conv3 = conv9x9(planes, planes)
self.pool3 = maxpool3x3()
self.conv4 = conv5x5(planes, planes // self.expansion)
self.conv5 = conv9x9(planes // self.expansion, planes * self.expansion)
self.conv6 = conv1x1(planes * self.expansion, planes * self.expansion)
self.conv7 = conv1x1(planes * self.expansion, self.out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.relu(out)
out = self.pool1(out)
out = self.conv2(out)
out = self.relu(out)
out = self.pool2(out)
out = self.conv3(out)
out = self.relu(out)
out = self.pool3(out)
out = self.conv4(out)
out = self.relu(out)
out = self.conv5(out)
out = self.relu(out)
out = self.conv6(out)
out = self.relu(out)
out = self.conv7(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x3 = xindex // 32
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-65 + 2 * x0 + 128 * x3), tmp10,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-64 + 2 * x0 + 128 * x3), tmp16,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-63 + 2 * x0 + 128 * x3), tmp23,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 128 * x3), tmp30,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 128 * x3), tmp33, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x3), tmp36,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (63 + 2 * x0 + 128 * x3), tmp43,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x3), tmp46,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x3), tmp49,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x4, tmp51, None)
tl.store(out_ptr1 + x4, tmp76, None)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x3 = xindex // 16
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-33 + 2 * x0 + 64 * x3), tmp10,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-32 + 2 * x0 + 64 * x3), tmp16,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-31 + 2 * x0 + 64 * x3), tmp23,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 64 * x3), tmp30,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 64 * x3), tmp33, eviction_policy=
'evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x3), tmp36,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (31 + 2 * x0 + 64 * x3), tmp43,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x3), tmp46,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x3), tmp49,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x4, tmp51, None)
tl.store(out_ptr1 + x4, tmp76, None)
@triton.jit
def triton_poi_fused_relu_4(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, None)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 8 % 8
x0 = xindex % 8
x3 = xindex // 8
x4 = xindex
tmp0 = -1 + 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 16, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = -1 + 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (-17 + 2 * x0 + 32 * x3), tmp10 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (-16 + 2 * x0 + 32 * x3), tmp16 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (-15 + 2 * x0 + 32 * x3), tmp23 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (-1 + 2 * x0 + 32 * x3), tmp30 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (2 * x0 + 32 * x3), tmp33 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x3), tmp36 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 1 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (15 + 2 * x0 + 32 * x3), tmp43 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x3), tmp49 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tmp52 = tmp17 > tmp11
tmp53 = tl.full([1], 1, tl.int8)
tmp54 = tl.full([1], 0, tl.int8)
tmp55 = tl.where(tmp52, tmp53, tmp54)
tmp56 = tmp24 > tmp18
tmp57 = tl.full([1], 2, tl.int8)
tmp58 = tl.where(tmp56, tmp57, tmp55)
tmp59 = tmp31 > tmp25
tmp60 = tl.full([1], 3, tl.int8)
tmp61 = tl.where(tmp59, tmp60, tmp58)
tmp62 = tmp34 > tmp32
tmp63 = tl.full([1], 4, tl.int8)
tmp64 = tl.where(tmp62, tmp63, tmp61)
tmp65 = tmp37 > tmp35
tmp66 = tl.full([1], 5, tl.int8)
tmp67 = tl.where(tmp65, tmp66, tmp64)
tmp68 = tmp44 > tmp38
tmp69 = tl.full([1], 6, tl.int8)
tmp70 = tl.where(tmp68, tmp69, tmp67)
tmp71 = tmp47 > tmp45
tmp72 = tl.full([1], 7, tl.int8)
tmp73 = tl.where(tmp71, tmp72, tmp70)
tmp74 = tmp50 > tmp48
tmp75 = tl.full([1], 8, tl.int8)
tmp76 = tl.where(tmp74, tmp75, tmp73)
tl.store(out_ptr0 + x4, tmp51, xmask)
tl.store(out_ptr1 + x4, tmp76, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (4, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_4, (4, 4, 9, 9), (324, 81, 9, 1))
assert_size_stride(primals_5, (1, 4, 5, 5), (100, 25, 5, 1))
assert_size_stride(primals_6, (16, 1, 9, 9), (81, 81, 9, 1))
assert_size_stride(primals_7, (16, 16, 1, 1), (16, 1, 1, 1))
assert_size_stride(primals_8, (4, 16, 1, 1), (16, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(65536)](buf1, 65536, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(16384)](buf1, buf2,
buf3, 16384, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 32, 32), (4096, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_relu_2[grid(16384)](buf5, 16384, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.float32)
buf7 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(4096)](buf5, buf6,
buf7, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_4, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 16, 16), (1024, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_relu_4[grid(4096)](buf9, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32
)
buf11 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(1024)](buf9, buf10,
buf11, 1024, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_5, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 8, 8), (64, 64, 8, 1))
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(256)](buf13, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_6, stride=(1, 1),
padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 16, 8, 8), (1024, 64, 8, 1))
buf15 = buf14
del buf14
triton_poi_fused_relu_4[grid(4096)](buf15, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf16 = extern_kernels.convolution(buf15, primals_7, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 16, 8, 8), (1024, 64, 8, 1))
buf17 = buf16
del buf16
triton_poi_fused_relu_4[grid(4096)](buf17, 4096, XBLOCK=256,
num_warps=4, num_stages=1)
buf18 = extern_kernels.convolution(buf17, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 8, 8), (256, 64, 8, 1))
return (buf18, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, buf1, buf2, buf3, buf5, buf6, buf7,
buf9, buf10, buf11, buf13, buf15, buf17)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False)
def conv5x5(in_planes, out_planes, stride=1):
"""5x5 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, bias=False)
def conv9x9(in_planes, out_planes, stride=1):
"""9x9 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=9, stride=stride,
padding=4, bias=False)
def maxpool3x3(stride=2):
"""3x3 maxpooling with padding"""
return nn.MaxPool2d(kernel_size=3, stride=stride, padding=1)
class FirstStageNew(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, out_channels, stride=1):
super(FirstStageNew, self).__init__()
self.out_channels = out_channels
self.conv1 = conv9x9(3, planes)
self.pool1 = maxpool3x3()
self.conv2 = conv9x9(planes, planes)
self.pool2 = maxpool3x3()
self.conv3 = conv9x9(planes, planes)
self.pool3 = maxpool3x3()
self.conv4 = conv5x5(planes, planes // self.expansion)
self.conv5 = conv9x9(planes // self.expansion, planes * self.expansion)
self.conv6 = conv1x1(planes * self.expansion, planes * self.expansion)
self.conv7 = conv1x1(planes * self.expansion, self.out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_7 = self.conv6.weight
primals_8 = self.conv7.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| YibinXie/Pose_Estimation | FirstStage | false | 2,987 | [
"MIT"
] | 0 | 5849140bf842bf3aeaad75827f5e7b7f2999c9ee | https://github.com/YibinXie/Pose_Estimation/tree/5849140bf842bf3aeaad75827f5e7b7f2999c9ee | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False)
def conv5x5(in_planes, out_planes, stride=1):
"""5x5 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,
padding=2, bias=False)
def conv9x9(in_planes, out_planes, stride=1):
"""9x9 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=9, stride=stride,
padding=4, bias=False)
def maxpool3x3(stride=2):
"""3x3 maxpooling with padding"""
return nn.MaxPool2d(kernel_size=3, stride=stride, padding=1)
class Model(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, out_channels, stride=1):
super().__init__()
self.out_channels = out_channels
self.conv1 = conv9x9(3, planes)
self.pool1 = maxpool3x3()
self.conv2 = conv9x9(planes, planes)
self.pool2 = maxpool3x3()
self.conv3 = conv9x9(planes, planes)
self.pool3 = maxpool3x3()
self.conv4 = conv5x5(planes, planes // self.expansion)
self.conv5 = conv9x9(planes // self.expansion, planes * self.expansion)
self.conv6 = conv1x1(planes * self.expansion, planes * self.expansion)
self.conv7 = conv1x1(planes * self.expansion, self.out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.relu(out)
out = self.pool1(out)
out = self.conv2(out)
out = self.relu(out)
out = self.pool2(out)
out = self.conv3(out)
out = self.relu(out)
out = self.pool3(out)
out = self.conv4(out)
out = self.relu(out)
out = self.conv5(out)
out = self.relu(out)
out = self.conv6(out)
out = self.relu(out)
out = self.conv7(out)
return out
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [4, 4, 4]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jv/cjvfpvazszqsn7k2c7ac25njk43pn5fjlaxzgkwwsgomov2lqu5x.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7t/c7tl4sumjtjry7tjtgnqmbovvksgqubsdowfuhrkz4ymnneywfuc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ns/cnszijuiz432ctw37rqktvk3syr2vugzeuatmva3neoizic6f3sq.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_5,), kwargs = {})
triton_poi_fused_tanh_2 = async_compile.triton('triton_poi_fused_tanh_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (48, 24), (24, 1))
assert_size_stride(primals_5, (48, ), (1, ))
assert_size_stride(primals_6, (4, 48), (48, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf7, 1536, grid=grid(1536), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 48), (48, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(primals_4, (24, 48), (1, 24), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 48), (768, 192, 48, 1), 0); del buf2 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 48), (768, 192, 48, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf6, 3072, grid=grid(3072), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 48), (48, 1), 0), reinterpret_tensor(primals_6, (48, 4), (1, 48), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(buf3, (64, 48), (48, 1), 0), buf5, primals_6, buf6, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((24, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((48, 24), (24, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 48), (48, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=24,
fc2_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 3072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_tanh_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (48, 24), (24, 1))
assert_size_stride(primals_5, (48,), (1,))
assert_size_stride(primals_6, (4, 48), (48, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 24), (24, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 24), (384, 96, 24, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 4, 24), (384, 96, 24, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(1536)](buf1,
primals_2, buf7, 1536, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 48), (48, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 24), (24, 1), 0),
reinterpret_tensor(primals_4, (24, 48), (1, 24), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 48), (768, 192, 48, 1), 0)
del buf2
buf6 = empty_strided_cuda((4, 4, 4, 48), (768, 192, 48, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(3072)](buf3,
primals_5, buf6, 3072, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 48), (48, 1), 0),
reinterpret_tensor(primals_6, (48, 4), (1, 48), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_tanh_2[grid(256)](buf5, primals_7, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_7
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 24), (24, 1), 0), reinterpret_tensor(
buf3, (64, 48), (48, 1), 0), buf5, primals_6, buf6, primals_4, buf7
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class ActorNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=24,
fc2_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(ActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| YufengJin/deep-reinforcement-learning | Actor | false | 2,988 | [
"MIT"
] | 0 | 141cf00f169b46aa492c9e7520429bfdaab0117d | https://github.com/YufengJin/deep-reinforcement-learning/tree/141cf00f169b46aa492c9e7520429bfdaab0117d | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Model(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc1_units=24,
fc2_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc1_units)
self.fc2 = nn.Linear(fc1_units, fc2_units)
self.fc3 = nn.Linear(fc2_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
return F.tanh(self.fc3(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [res, res_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# res => convolution
# res_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gz/cgz3i5sfiy2r2zcw7on5c3vgziypvqy3tqtrdk4orqs3phgqxlob.py
# Topologically Sorted Source Nodes: [res_2, x], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# res_2 => convolution_1
# x => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %convolution_1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_out_ptr0 + (x3), xmask)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [res], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [res, res_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [res_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [res_2, x], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_1.run(buf3, primals_3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch
import torch.nn as nn
class ResBlock(nn.Module):
def __init__(self, inFe):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inFe, inFe, 3, 1, 1)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(inFe, inFe, 3, 1, 1)
def forward(self, x):
res = self.conv1(x)
res = self.relu(res)
res = self.conv2(res)
x = x + res
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inFe': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_out_ptr0 + x3, xmask)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_1[grid(256)](buf3, primals_3,
primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1
class ResBlockNew(nn.Module):
def __init__(self, inFe):
super(ResBlockNew, self).__init__()
self.conv1 = nn.Conv2d(inFe, inFe, 3, 1, 1)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(inFe, inFe, 3, 1, 1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ZhibingLai/MSFN | ResBlock | false | 2,989 | [
"Apache-2.0"
] | 0 | eb650c351edf27270bc32b50b60842a9fe40308e | https://github.com/ZhibingLai/MSFN/tree/eb650c351edf27270bc32b50b60842a9fe40308e | import torch
import torch.utils.data
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, inFe):
super().__init__()
self.conv1 = nn.Conv2d(inFe, inFe, 3, 1, 1)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(inFe, inFe, 3, 1, 1)
def forward(self, x):
res = self.conv1(x)
res = self.relu(res)
res = self.conv2(res)
x = x + res
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
AvgReducePool1d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jn/cjnv5uptstyk4xaisuiw5kf5lbz3m33meejxhbfbsta5ozps7ijn.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [2]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class AvgReducePool1d(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.mean(input, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AvgReducePool1dNew(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ZhitingHu/texar-pytorch | AvgReducePool1d | false | 2,990 | [
"Apache-2.0"
] | 0 | 72ea115013ced8a5a2b004eacf6271184d3572a8 | https://github.com/ZhitingHu/texar-pytorch/tree/72ea115013ced8a5a2b004eacf6271184d3572a8 | import torch
from torch import nn
class Model(nn.Module):
"""A subclass of :torch_nn:`Module`.
Avg Pool layer for 1D inputs. The same as :torch_nn:`AvgPool1d` except that
the pooling dimension is entirely reduced (i.e., `pool_size=input_length`).
"""
def forward(self, input: 'torch.Tensor') ->torch.Tensor:
return torch.mean(input, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
FeatureAssembler | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cz/ccz75kr63vqusrmgpms64x6b3esgmjev377qdx5r2v35pm2wuvrn.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%expand, %expand_1, %arg2_1, %arg3_1], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = (xindex // 64)
x3 = (xindex // 16)
x4 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x2) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + ((4*x2) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + ((4*x3) + ((-8) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + ((4*x3) + ((-12) + x0)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x4), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, arg1_1, arg2_1, arg3_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
import torch.nn as nn
class FeatureAssembler(nn.Module):
def __init__(self, T: 'int', embed_static: 'Optional[FeatureEmbedder]'=
None, embed_dynamic: 'Optional[FeatureEmbedder]'=None) ->None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict({'embed_static': embed_static,
'embed_dynamic': embed_dynamic})
def forward(self, feat_static_cat: 'torch.Tensor', feat_static_real:
'torch.Tensor', feat_dynamic_cat: 'torch.Tensor', feat_dynamic_real:
'torch.Tensor') ->torch.Tensor:
processed_features = [self.process_static_cat(feat_static_cat),
self.process_static_real(feat_static_real), self.
process_dynamic_cat(feat_dynamic_cat), self.
process_dynamic_real(feat_dynamic_real)]
return torch.cat(processed_features, dim=-1)
def process_static_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_static'] is not None:
feature = self.embeddings['embed_static'](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_dynamic'] is None:
return feature.float()
else:
return self.embeddings['embed_dynamic'](feature)
def process_static_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4]),
torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'T': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Optional
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x2 = xindex // 64
x3 = xindex // 16
x4 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x2 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (4 * x2 + (-4 + x0)), tmp9 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (4 * x3 + (-8 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (4 * x3 + (-12 + x0)), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x4, tmp22, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(256)](arg0_1, arg1_1, arg2_1, arg3_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf0,
class FeatureAssemblerNew(nn.Module):
def __init__(self, T: 'int', embed_static: 'Optional[FeatureEmbedder]'=
None, embed_dynamic: 'Optional[FeatureEmbedder]'=None) ->None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict({'embed_static': embed_static,
'embed_dynamic': embed_dynamic})
def process_static_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_static'] is not None:
feature = self.embeddings['embed_static'](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_dynamic'] is None:
return feature.float()
else:
return self.embeddings['embed_dynamic'](feature)
def process_static_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| ZhuangweiKang/pytorch-ts | FeatureAssembler | false | 2,991 | [
"Apache-2.0",
"MIT"
] | 0 | 076d456358fd1bac96becba4f1ba38ec5a5fcf4d | https://github.com/ZhuangweiKang/pytorch-ts/tree/076d456358fd1bac96becba4f1ba38ec5a5fcf4d | import torch
from typing import Optional
import torch.nn as nn
class Model(nn.Module):
def __init__(self, T: 'int', embed_static: 'Optional[FeatureEmbedder]'=
None, embed_dynamic: 'Optional[FeatureEmbedder]'=None) ->None:
super().__init__()
self.T = T
self.embeddings = nn.ModuleDict({'embed_static': embed_static,
'embed_dynamic': embed_dynamic})
def forward(self, feat_static_cat: 'torch.Tensor', feat_static_real:
'torch.Tensor', feat_dynamic_cat: 'torch.Tensor', feat_dynamic_real:
'torch.Tensor') ->torch.Tensor:
processed_features = [self.process_static_cat(feat_static_cat),
self.process_static_real(feat_static_real), self.
process_dynamic_cat(feat_dynamic_cat), self.
process_dynamic_real(feat_dynamic_real)]
return torch.cat(processed_features, dim=-1)
def process_static_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_static'] is not None:
feature = self.embeddings['embed_static'](feature)
return feature.unsqueeze(1).expand(-1, self.T, -1).float()
def process_dynamic_cat(self, feature: 'torch.Tensor') ->torch.Tensor:
if self.embeddings['embed_dynamic'] is None:
return feature.float()
else:
return self.embeddings['embed_dynamic'](feature)
def process_static_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature.unsqueeze(1).expand(-1, self.T, -1)
def process_dynamic_real(self, feature: 'torch.Tensor') ->torch.Tensor:
return feature
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4]),
torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
TimeIntervalMultiHeadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# scores => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rh/crhucv7amb3b5kcd7hcpoyooazu5gjrqlakjwmi2siwommghfiiw.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# scores => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (y0 + (4*x2) + (16*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pv/cpvxifzinnau3xu6vyqheclzr6mcqs5g6lyequacfqmznapjidec.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_14, 0), kwargs = {})
triton_poi_fused_eq_2 = async_compile.triton('triton_poi_fused_eq_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cf/ccfonxhcfdgsllfvzckk57iewncu6ns6udtyllr7p2kpri4jpwoo.py
# Topologically Sorted Source Nodes: [scores_2, masked_fill_], Original ATen: [aten.div, aten.masked_fill]
# Source node to ATen node mapping:
# masked_fill_ => full_default, where
# scores_2 => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_15, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
triton_poi_fused_div_masked_fill_3 = async_compile.triton('triton_poi_fused_div_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y5 = yindex
x3 = (xindex // 4)
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*y0)), xmask & ymask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x4 + (16*y5)), xmask & ymask)
tmp2 = tl.load(in_ptr1 + (y0 + (4*x3) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0 + (4*x4) + (64*y1)), xmask & ymask)
tmp4 = tmp2 + tmp3
tmp6 = tmp4 * tmp5
tmp7 = tmp1 + tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = float("-inf")
tmp11 = tl.where(tmp0, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4 + (16*y5)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/br/cbrl3wqdc7dvt5uwwxwjmmvx37shkc24fo3mz6cijpjqzba5yj6b.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# Graph fragment:
# %max_1 : [num_users=3] = call_function[target=torch.ops.aten.max.default](args = (%where,), kwargs = {})
triton_per_fused_max_4 = async_compile.triton('triton_per_fused_max_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_4', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_4(in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6z/c6zgwze5mtlchc4eipqevlwskedu5dkqbvepsqiqryygvanfe23f.py
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax]
# Source node to ATen node mapping:
# scores_3 => amax, exp, sub_1, sum_2
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_sub_5 = async_compile.triton('triton_poi_fused__softmax_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 - tmp2
tmp5 = tmp4 - tmp2
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7 - tmp2
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 - tmp2
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp3 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp5 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp8 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tl.store(out_ptr0 + (x0), tmp12, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/74/c74tdl7a2edlsizs6anoi6wrjsxhfv6dhojc7qmrbiwht76fqine.py
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
# Source node to ATen node mapping:
# scores_3 => amax, div_1, exp, sub_1, sum_2
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%where,), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%max_1,), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%isnan, %isnan_1), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%where, %max_1), kwargs = {})
# %logical_or : [num_users=1] = call_function[target=torch.ops.aten.logical_or.default](args = (%eq_1, %logical_and), kwargs = {})
triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6 = async_compile.triton('triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp0 - tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 == tmp2
tmp10 = libdevice.isnan(tmp0).to(tl.int1)
tmp11 = libdevice.isnan(tmp2).to(tl.int1)
tmp12 = tmp10 & tmp11
tmp13 = tmp9 | tmp12
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ln/cln5qk6apvk67nzbwpzdbitg5xss2reftt3lutxlyqjjbd6wpt2c.py
# Topologically Sorted Source Nodes: [mul_1, sum_2, output_1], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# mul_1 => mul_1
# output_1 => add_3
# sum_2 => sum_3
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %permute_9), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-2]), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %sum_3), kwargs = {})
triton_poi_fused_add_mul_sum_7 = async_compile.triton('triton_poi_fused_add_mul_sum_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sum_7(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + ((4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (8 + y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (12 + y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + (4*y3)), tmp16, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_12, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_13, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, primals_6, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf0, primals_3, primals_4, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
del primals_4
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
triton_poi_fused_eq_2.run(primals_14, buf6, 64, grid=grid(64), stream=stream0)
del primals_14
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [scores_2, masked_fill_], Original ATen: [aten.div, aten.masked_fill]
triton_poi_fused_div_masked_fill_3.run(buf7, buf6, buf1, primals_6, primals_12, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_6
buf8 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
triton_per_fused_max_4.run(buf7, buf8, 1, 256, grid=grid(1), stream=stream0)
buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf10 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax]
triton_poi_fused__softmax_sub_5.run(buf7, buf8, buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6.run(buf7, buf8, buf9, buf10, buf11, buf15, 256, grid=grid(256), stream=stream0)
del buf10
del buf7
del buf8
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_9, primals_11, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_11
del primals_9
buf13 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [mul_1, sum_2, output_1], Original ATen: [aten.mul, aten.sum, aten.add]
triton_poi_fused_add_mul_sum_7.run(buf14, buf11, primals_13, 16, 4, grid=grid(16, 4), stream=stream0)
return (reinterpret_tensor(buf14, (4, 4, 4), (16, 1, 4), 0), primals_12, primals_13, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0), buf6, buf11, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0), buf15, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class TimeIntervalMultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask):
bs, seq_len = k.size(0), k.size(1)
k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k)
if not self.kq_same:
q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k)
else:
q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k)
v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_k = inter_k.transpose(2, 3).transpose(1, 2)
inter_v = inter_v.transpose(2, 3).transpose(1, 2)
output = self.scaled_dot_product_attention(q, k, v, inter_k,
inter_v, self.d_k, mask)
output = output.transpose(1, 2).reshape(bs, -1, self.d_model)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1))
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v)
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4, 4, 1]), torch.rand([4, 4, 4, 4, 1]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'n_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y5 = yindex
x3 = xindex // 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * y0), xmask & ymask, eviction_policy
='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x4 + 16 * y5), xmask & ymask)
tmp2 = tl.load(in_ptr1 + (y0 + 4 * x3 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0 + 4 * x4 + 64 * y1), xmask & ymask)
tmp4 = tmp2 + tmp3
tmp6 = tmp4 * tmp5
tmp7 = tmp1 + tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = float('-inf')
tmp11 = tl.where(tmp0, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4 + 16 * y5), tmp11, xmask & ymask)
@triton.jit
def triton_per_fused_max_4(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused__softmax_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 - tmp2
tmp5 = tmp4 - tmp2
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7 - tmp2
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 - tmp2
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp3 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp5 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp8 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tl.store(out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp0 - tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 == tmp2
tmp10 = libdevice.isnan(tmp0).to(tl.int1)
tmp11 = libdevice.isnan(tmp2).to(tl.int1)
tmp12 = tmp10 & tmp11
tmp13 = tmp9 | tmp12
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_mul_sum_7(in_out_ptr0, in_ptr0, in_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (8 + y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (12 + y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp16, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_11, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_12, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_13, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_14, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_7, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf2)
del primals_8
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_6, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, primals_4,
buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
del primals_4
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_14, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_14
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_div_masked_fill_3[grid(16, 16)](buf7, buf6, buf1,
primals_6, primals_12, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=
4, num_stages=1)
del primals_6
buf8 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_max_4[grid(1)](buf7, buf8, 1, 256, num_warps=2,
num_stages=1)
buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf10 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf0
triton_poi_fused__softmax_sub_5[grid(64)](buf7, buf8, buf9, buf10,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6[grid
(256)](buf7, buf8, buf9, buf10, buf11, buf15, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf10
del buf7
del buf8
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_9, primals_11,
buf12, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_11
del primals_9
buf13 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf13
triton_poi_fused_add_mul_sum_7[grid(16, 4)](buf14, buf11,
primals_13, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
return reinterpret_tensor(buf14, (4, 4, 4), (16, 1, 4), 0
), primals_12, primals_13, reinterpret_tensor(primals_1, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_7, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_10, (16, 4), (4, 1), 0
), buf6, buf11, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0
), buf15, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0
), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0)
class TimeIntervalMultiHeadAttentionNew(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1))
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v)
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6, input_7):
primals_2 = self.v_linear.weight
primals_3 = self.v_linear.bias
primals_5 = self.k_linear.weight
primals_6 = self.k_linear.bias
primals_8 = self.q_linear.weight
primals_9 = self.q_linear.bias
primals_1 = input_0
primals_4 = input_1
primals_7 = input_2
primals_10 = input_3
primals_11 = input_4
primals_12 = input_5
primals_13 = input_6
primals_14 = input_7
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14])
return output[0]
| Yingting-dev/ReChorus | TimeIntervalMultiHeadAttention | false | 2,992 | [
"MIT"
] | 0 | a16bc1e42f3e90e889133d7476c52ada44db573b | https://github.com/Yingting-dev/ReChorus/tree/a16bc1e42f3e90e889133d7476c52ada44db573b | import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class Model(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask):
bs, seq_len = k.size(0), k.size(1)
k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k)
if not self.kq_same:
q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k)
else:
q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k)
v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_k = inter_k.transpose(2, 3).transpose(1, 2)
inter_v = inter_v.transpose(2, 3).transpose(1, 2)
output = self.scaled_dot_product_attention(q, k, v, inter_k,
inter_v, self.d_k, mask)
output = output.transpose(1, 2).reshape(bs, -1, self.d_model)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1))
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v)
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4, 4, 1]), torch.rand([4, 4, 4, 4, 1]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
CondUpsampler | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/od/codshuh7xiczt6wn6tkscsdoekar2wyxdmcrcatech4332f4adgv.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.4), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.4
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vb/cvbbnf5kvbp7qrwe63wo6rreqxl6chogtxz44wlirthtsbxxxjtr.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_3, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 0.4), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %view_3, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.4
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 128, grid=grid(128), stream=stream0)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (64, 2), (2, 1), 0), reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, primals_5, buf4, buf5, 256, grid=grid(256), stream=stream0)
del buf3
del primals_5
return (buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), buf4, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 2), (2, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
class CondUpsampler(nn.Module):
def __init__(self, cond_length, target_dim):
super().__init__()
self.linear1 = nn.Linear(cond_length, target_dim // 2)
self.linear2 = nn.Linear(target_dim // 2, target_dim)
def forward(self, x):
x = self.linear1(x)
x = F.leaky_relu(x, 0.4)
x = self.linear2(x)
x = F.leaky_relu(x, 0.4)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'cond_length': 4, 'target_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.4
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.4
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (2, 4), (4, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 2), (2, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 2), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(128)](buf0, primals_2, buf1,
buf2, 128, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (64, 2), (2, 1), 0),
reinterpret_tensor(primals_4, (2, 4), (1, 2), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(256)](buf3, primals_5, buf4,
buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf3
del primals_5
return buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf1, reinterpret_tensor(buf2, (64, 2), (2, 1), 0), buf4, primals_4
class CondUpsamplerNew(nn.Module):
def __init__(self, cond_length, target_dim):
super().__init__()
self.linear1 = nn.Linear(cond_length, target_dim // 2)
self.linear2 = nn.Linear(target_dim // 2, target_dim)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ZhuangweiKang/pytorch-ts | CondUpsampler | false | 2,993 | [
"Apache-2.0",
"MIT"
] | 0 | 076d456358fd1bac96becba4f1ba38ec5a5fcf4d | https://github.com/ZhuangweiKang/pytorch-ts/tree/076d456358fd1bac96becba4f1ba38ec5a5fcf4d | import torch
import torch.nn.functional as F
import torch.nn as nn
class Model(nn.Module):
def __init__(self, cond_length, target_dim):
super().__init__()
self.linear1 = nn.Linear(cond_length, target_dim // 2)
self.linear2 = nn.Linear(target_dim // 2, target_dim)
def forward(self, x):
x = self.linear1(x)
x = F.leaky_relu(x, 0.4)
x = self.linear2(x)
x = F.leaky_relu(x, 0.4)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
GatedLinearUnit | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lj/cljivz6kd6focnzfg5627dddqf3ibp7tilm4thbme5qqscfovxkf.py
# Topologically Sorted Source Nodes: [sigmoid, val_1, mul], Original ATen: [aten.sigmoid, aten.tanh, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# val_1 => tanh
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%getitem_1,), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%getitem,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %tanh), kwargs = {})
triton_poi_fused_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_mul_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp2 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, val_1, mul], Original ATen: [aten.sigmoid, aten.tanh, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class GatedLinearUnit(nn.Module):
def __init__(self, dim: 'int'=-1, nonlinear: 'bool'=True):
super().__init__()
self.dim = dim
self.nonlinear = nonlinear
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
val, gate = torch.chunk(x, 2, dim=self.dim)
if self.nonlinear:
val = torch.tanh(val)
return torch.sigmoid(gate) * val
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sigmoid_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = libdevice.tanh(tmp2)
tmp4 = tmp1 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sigmoid_tanh_0[grid(128)](arg0_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GatedLinearUnitNew(nn.Module):
def __init__(self, dim: 'int'=-1, nonlinear: 'bool'=True):
super().__init__()
self.dim = dim
self.nonlinear = nonlinear
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ZhuangweiKang/pytorch-ts | GatedLinearUnit | false | 2,994 | [
"Apache-2.0",
"MIT"
] | 0 | 076d456358fd1bac96becba4f1ba38ec5a5fcf4d | https://github.com/ZhuangweiKang/pytorch-ts/tree/076d456358fd1bac96becba4f1ba38ec5a5fcf4d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, dim: 'int'=-1, nonlinear: 'bool'=True):
super().__init__()
self.dim = dim
self.nonlinear = nonlinear
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
val, gate = torch.chunk(x, 2, dim=self.dim)
if self.nonlinear:
val = torch.tanh(val)
return torch.sigmoid(gate) * val
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
StyledConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wi/cwiyl3lwwtancorrifw77xt3aqb4lermdintht45zvkj3bg54nbl.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ri/criuvsdl3sferb4bb6ci5zaps3wys7xxcpybz7vfo2ba4q7cuq6c.py
# Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt]
# Source node to ATen node mapping:
# add => add
# demod => rsqrt
# mul_2 => mul_2
# pow_1 => pow_1
# sum_1 => sum_1
# weight => mul_3
# weight_1 => mul_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, 0.125), kwargs = {})
# %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %view), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mul_3, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2, 3, 4]), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %view_1), kwargs = {})
triton_per_fused_add_mul_pow_rsqrt_sum_2 = async_compile.triton('triton_per_fused_add_mul_pow_rsqrt_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_pow_rsqrt_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r5 = rindex
x0 = xindex % 4
r3 = (rindex // 16)
x1 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r5 + (64*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + (4*x1)), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp4 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + (x4), tmp12, xmask)
tl.store(out_ptr0 + (r5 + (64*x4)), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/m5/cm56eqdvtyi73jnsik4k6g5v62qofgoqvpcasegx6ljukv6gdjyd.py
# Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# add_2 => add_2
# leaky_relu => gt, mul_6, where
# mul_5 => mul_5
# out_3 => add_1
# out_4 => mul_7
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %normal_functional), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_4, %mul_5), kwargs = {})
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %view_5), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_2, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_2, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, 1.4142135623730951), kwargs = {})
triton_poi_fused_add_leaky_relu_mul_3 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 25
x2 = (xindex // 100)
x1 = (xindex // 25) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + (25*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = 1.4142135623730951
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + (x3), tmp9, xmask)
tl.store(out_ptr1 + (x3), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, out], Original ATen: [aten.mul, aten.addmm]
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0; del buf0 # reuse
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, weight, pow_1, sum_1, add, demod, weight_1], Original ATen: [aten.mul, aten.pow, aten.sum, aten.add, aten.rsqrt]
triton_per_fused_add_mul_pow_rsqrt_sum_2.run(buf4, primals_5, buf2, buf5, 16, 64, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1))
buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [noise], Original ATen: [aten.normal_functional]
buf8 = torch.ops.aten.normal_functional.default(buf7)
del buf7
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_5, out_3, add_2, leaky_relu, out_4], Original ATen: [aten.mul, aten.add, aten.leaky_relu]
triton_poi_fused_add_leaky_relu_mul_3.run(buf6, primals_6, buf9, primals_7, buf10, buf11, 400, grid=grid(400), stream=stream0)
del buf6
del primals_6
del primals_7
return (buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5, (16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1, 16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
from torch import nn
from torch.nn import functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1
], pad[0], pad[1])
return out
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class StyledConv(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
out = self.activate(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'out_channel': 4, 'kernel_size': 4,
'style_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import math
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_per_fused_add_mul_pow_rsqrt_sum_2(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r5 = rindex
x0 = xindex % 4
r3 = rindex // 16
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r5 + 64 * x0), xmask, eviction_policy=
'evict_last', other=0.0)
tmp3 = tl.load(in_ptr1 + (r3 + 4 * x1), xmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = 0.125
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp5 = tmp4 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = 1e-08
tmp11 = tmp9 + tmp10
tmp12 = libdevice.rsqrt(tmp11)
tmp13 = tmp4 * tmp12
tl.debug_barrier()
tl.store(in_out_ptr0 + x4, tmp12, xmask)
tl.store(out_ptr0 + (r5 + 64 * x4), tmp13, xmask)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 25
x2 = xindex // 100
x1 = xindex // 25 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tl.load(in_ptr2 + (x0 + 25 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp4 = tmp2 * tmp3
tmp5 = tmp0 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 0.0
tmp9 = tmp7 > tmp8
tmp10 = 0.2
tmp11 = tmp7 * tmp10
tmp12 = tl.where(tmp9, tmp7, tmp11)
tmp13 = 1.4142135623730951
tmp14 = tmp12 * tmp13
tl.store(out_ptr0 + x3, tmp9, xmask)
tl.store(out_ptr1 + x3, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (1, 4, 4, 4, 4), (256, 64, 16, 4, 1))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_2, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_4, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
buf3 = buf0
del buf0
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_per_fused_add_mul_pow_rsqrt_sum_2[grid(16)](buf4, primals_5,
buf2, buf5, 16, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4,
4, 4), (64, 16, 4, 1), 0), stride=(1, 1), padding=(2, 2),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=4, bias=None)
assert_size_stride(buf6, (1, 16, 5, 5), (400, 25, 5, 1))
buf7 = empty_strided_cuda((4, 1, 5, 5), (25, 25, 5, 1), torch.float32)
buf8 = torch.ops.aten.normal_functional.default(buf7)
del buf7
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32
)
triton_poi_fused_add_leaky_relu_mul_3[grid(400)](buf6, primals_6,
buf9, primals_7, buf10, buf11, 400, XBLOCK=256, num_warps=4,
num_stages=1)
del buf6
del primals_6
del primals_7
return buf11, primals_4, primals_5, buf2, buf4, reinterpret_tensor(buf5,
(16, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (1,
16, 4, 4), (256, 16, 4, 1), 0), buf9, buf10
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1
], pad[0], pad[1])
return out
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = len(blur_kernel) - factor - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor
=factor)
if downsample:
factor = 2
p = len(blur_kernel) - factor + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(torch.randn(1, out_channel, in_channel,
kernel_size, kernel_size))
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-08)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(batch * self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(batch, self.out_channel, in_channel, self.
kernel_size, self.kernel_size)
weight = weight.transpose(1, 2).reshape(batch * in_channel,
self.out_channel, self.kernel_size, self.kernel_size)
out = F.conv_transpose2d(input, weight, padding=0, stride=2,
groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class StyledConvNew(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
upsample=False, blur_kernel=[1, 3, 3, 1], demodulate=True):
super().__init__()
self.conv = ModulatedConv2d(in_channel, out_channel, kernel_size,
style_dim, upsample=upsample, blur_kernel=blur_kernel,
demodulate=demodulate)
self.noise = NoiseInjection()
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input_0, input_1):
primals_5 = self.conv.weight
primals_2 = self.conv.modulation.weight
primals_3 = self.conv.modulation.bias
primals_6 = self.noise.weight
primals_7 = self.activate.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| YotamNitzan/pixel2style2pixel | StyledConv | false | 2,995 | [
"MIT"
] | 0 | b943f9e6de046a54b901eea1d8714cb02a71605f | https://github.com/YotamNitzan/pixel2style2pixel/tree/b943f9e6de046a54b901eea1d8714cb02a71605f | import math
import torch
from torch import nn
from torch.nn import functional as F
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
rest_dim = [1] * (input.ndim - bias.ndim - 1)
input = input
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=negative_slope) * scale
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1
], pad[0], pad[1])
return out
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self
.scale)
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * upsample_factor ** 2
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class ModulatedConv2d(nn.Module):
def __init__(self, in_channel, out_channel, kernel_size, style_dim,
demodulate=True, upsample=False, downsample=False, blur_kernel=[1,
3, 3, 1]):
super().__init__()
self.eps = 1e-08
self.kernel_size = kernel_size
# ... truncated (>4000 chars) for memory efficiency |
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ul/culsagfqamnnmddjotxaqqrzmmkltfbyazkye544lnbmocdfeo7k.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 28
x1 = (xindex // 28)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 24, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((24*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 28, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + ((4*x1) + ((-24) + x0)), tmp12 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lc/clcsnjcqfyfpolgflybhra37v7tkcuzg4jvogembn6crpgpcvdhc.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/h5/ch52uc2erke4n6rzuq4544hj2etkkufywfyw77ur444hhbzmpili.py
# Topologically Sorted Source Nodes: [xs], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# xs => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (48, 28), (28, 1))
assert_size_stride(primals_6, (48, ), (1, ))
assert_size_stride(primals_7, (1, 48), (48, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 24), (24, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 24), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 28), (28, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 112, grid=grid(112), stream=stream0)
del primals_4
buf2 = empty_strided_cuda((4, 48), (48, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (28, 48), (1, 28), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_6, 192, grid=grid(192), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7, (48, 1), (1, 48), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 24), (24, 1), torch.bool)
# Topologically Sorted Source Nodes: [xs], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf0, primals_2, buf6, 96, grid=grid(96), stream=stream0)
del buf0
del primals_2
return (buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((24, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((24, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((48, 28), (28, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((48, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 48), (48, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=24,
fc2_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 28
x1 = xindex // 28
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 24, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (24 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 28, tl.int64)
tmp15 = tl.load(in_ptr2 + (4 * x1 + (-24 + x0)), tmp12 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tl.where(tmp4, tmp11, tmp15)
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 48
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 96
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 24
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (24, 4), (4, 1))
assert_size_stride(primals_2, (24,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (48, 28), (28, 1))
assert_size_stride(primals_6, (48,), (1,))
assert_size_stride(primals_7, (1, 48), (48, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 24), (24, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 24),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 28), (28, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(112)](buf0, primals_2, primals_4, buf1,
112, XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
buf2 = empty_strided_cuda((4, 48), (48, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_5, (28, 48), (1,
28), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(192)](buf3, primals_6, 192, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf3, reinterpret_tensor(primals_7,
(48, 1), (1, 48), 0), alpha=1, beta=1, out=buf5)
del primals_8
buf6 = empty_strided_cuda((4, 24), (24, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(96)](buf0,
primals_2, buf6, 96, XBLOCK=128, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf5, primals_3, buf1, buf3, primals_7, primals_5, buf6
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class CriticNew(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=24,
fc2_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(CriticNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0, input_1):
primals_1 = self.fcs1.weight
primals_2 = self.fcs1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| YufengJin/deep-reinforcement-learning | Critic | false | 2,996 | [
"MIT"
] | 0 | 141cf00f169b46aa492c9e7520429bfdaab0117d | https://github.com/YufengJin/deep-reinforcement-learning/tree/141cf00f169b46aa492c9e7520429bfdaab0117d | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Model(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=24,
fc2_units=48):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, 1)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = F.relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.relu(self.fc2(x))
return self.fc3(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
TimeIntervalTransformerLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dk/cdk4odz276xorciau5ehgl7f3s2mgkf3hrye6xep6kzubczdeqqy.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# scores => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rh/crhucv7amb3b5kcd7hcpoyooazu5gjrqlakjwmi2siwommghfiiw.py
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# scores => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (y0 + (4*x2) + (16*y1)), tmp4, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pv/cpvxifzinnau3xu6vyqheclzr6mcqs5g6lyequacfqmznapjidec.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=2] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_12, 0), kwargs = {})
triton_poi_fused_eq_2 = async_compile.triton('triton_poi_fused_eq_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/cf/ccfonxhcfdgsllfvzckk57iewncu6ns6udtyllr7p2kpri4jpwoo.py
# Topologically Sorted Source Nodes: [scores_2, masked_fill_], Original ATen: [aten.div, aten.masked_fill]
# Source node to ATen node mapping:
# masked_fill_ => full_default, where
# scores_2 => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_15, 1.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -inf), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
triton_poi_fused_div_masked_fill_3 = async_compile.triton('triton_poi_fused_div_masked_fill_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_masked_fill_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y5 = yindex
x3 = (xindex // 4)
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x4 + (16*y0)), xmask & ymask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x4 + (16*y5)), xmask & ymask)
tmp2 = tl.load(in_ptr1 + (y0 + (4*x3) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0 + (4*x4) + (64*y1)), xmask & ymask)
tmp4 = tmp2 + tmp3
tmp6 = tmp4 * tmp5
tmp7 = tmp1 + tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = float("-inf")
tmp11 = tl.where(tmp0, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4 + (16*y5)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/br/cbrl3wqdc7dvt5uwwxwjmmvx37shkc24fo3mz6cijpjqzba5yj6b.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# Graph fragment:
# %max_1 : [num_users=3] = call_function[target=torch.ops.aten.max.default](args = (%where,), kwargs = {})
triton_per_fused_max_4 = async_compile.triton('triton_per_fused_max_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_max_4', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_max_4(in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6z/c6zgwze5mtlchc4eipqevlwskedu5dkqbvepsqiqryygvanfe23f.py
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax]
# Source node to ATen node mapping:
# scores_3 => amax, exp, sub_1, sum_2
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_sub_5 = async_compile.triton('triton_poi_fused__softmax_sub_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_sub_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 - tmp2
tmp5 = tmp4 - tmp2
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7 - tmp2
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 - tmp2
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp3 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp5 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp8 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tl.store(out_ptr0 + (x0), tmp12, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/74/c74tdl7a2edlsizs6anoi6wrjsxhfv6dhojc7qmrbiwht76fqine.py
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
# Source node to ATen node mapping:
# scores_3 => amax, div_1, exp, sub_1, sum_2
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_2), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%where,), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%max_1,), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%isnan, %isnan_1), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%where, %max_1), kwargs = {})
# %logical_or : [num_users=1] = call_function[target=torch.ops.aten.logical_or.default](args = (%eq_1, %logical_and), kwargs = {})
triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6 = async_compile.triton('triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp0 - tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 == tmp2
tmp10 = libdevice.isnan(tmp0).to(tl.int1)
tmp11 = libdevice.isnan(tmp2).to(tl.int1)
tmp12 = tmp10 & tmp11
tmp13 = tmp9 | tmp12
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ln/cln5qk6apvk67nzbwpzdbitg5xss2reftt3lutxlyqjjbd6wpt2c.py
# Topologically Sorted Source Nodes: [mul_1, sum_2, output_1], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# mul_1 => mul_1
# output_1 => add_3
# sum_2 => sum_3
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %permute_9), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-2]), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %sum_3), kwargs = {})
triton_poi_fused_add_mul_sum_7 = async_compile.triton('triton_poi_fused_add_mul_sum_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sum_7(in_out_ptr0, in_ptr0, in_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_out_ptr0 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + ((4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (8 + y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + (4*x2) + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (12 + y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + (4*y3)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7n/c7nskknqdigadnfriyezgeao7cxh4bt35wibpjsh7wrxyacbghj4.py
# Topologically Sorted Source Nodes: [add_2, context], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add_2 => add_4
# context => clone_4
# Graph fragment:
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_25, %primals_1), kwargs = {})
# %clone_4 : [num_users=3] = call_function[target=torch.ops.aten.clone.default](args = (%add_4,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_add_native_layer_norm_8 = async_compile.triton('triton_poi_fused_add_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hn/chnyp4bqchi6cc3qkpikodtjzt7sfs4gz3r2kunqaesb7ahrywso.py
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# context => add_5, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_4, [2]), kwargs = {correction: 0, keepdim: True})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/al/cal3txxjlyumb2wxf6pzsp7g5yvv5ygiluv6ygjjzldvb2woph4t.py
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# context => add_5, add_6, mul_2, mul_3, rsqrt, sub_2, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_4, [2]), kwargs = {correction: 0, keepdim: True})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_5,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_4, %getitem_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_13), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_14), kwargs = {})
triton_poi_fused_native_layer_norm_10 = async_compile.triton('triton_poi_fused_native_layer_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/x4/cx4dy7nub5axbgswcjd7xtsxm4hlex3zmjlfcyucivazkjm63wl6.py
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_3 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_27,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_11 = async_compile.triton('triton_poi_fused_relu_threshold_backward_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ly/clych72evfl43h75debtcdhychoab7iw7f2ch2b5ebe3jjglsndg.py
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_3 => add_7
# Graph fragment:
# %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_29, %add_6), kwargs = {})
triton_poi_fused_add_12 = async_compile.triton('triton_poi_fused_add_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_12', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_11, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4, ), (1, ))
assert_size_stride(primals_19, (4, ), (1, ))
assert_size_stride(primals_20, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, primals_6, buf3, 16, 4, grid=grid(16, 4), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf0, primals_3, primals_4, buf4, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_3
del primals_4
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
triton_poi_fused_eq_2.run(primals_12, buf6, 64, grid=grid(64), stream=stream0)
del primals_12
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [scores_2, masked_fill_], Original ATen: [aten.div, aten.masked_fill]
triton_poi_fused_div_masked_fill_3.run(buf7, buf6, buf1, primals_6, primals_10, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_6
buf8 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
triton_per_fused_max_4.run(buf7, buf8, 1, 256, grid=grid(1), stream=stream0)
buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf1 # reuse
buf10 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax]
triton_poi_fused__softmax_sub_5.run(buf7, buf8, buf9, buf10, 64, grid=grid(64), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [sub, scores_3], Original ATen: [aten.sub, aten._softmax, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6.run(buf7, buf8, buf9, buf10, buf11, buf27, 256, grid=grid(256), stream=stream0)
del buf7
del buf8
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf2, primals_8, primals_9, buf12, 16, 4, grid=grid(16, 4), stream=stream0)
del primals_8
del primals_9
buf13 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [mul_1, sum_2, output_1], Original ATen: [aten.mul, aten.sum, aten.add]
triton_poi_fused_add_mul_sum_7.run(buf14, buf11, primals_11, 16, 4, grid=grid(16, 4), stream=stream0)
buf15 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [add_2, context], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_8.run(buf14, primals_1, buf15, 16, 4, grid=grid(16, 4), stream=stream0)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf15, buf16, buf17, 16, grid=grid(16), stream=stream0)
buf18 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [context], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_10.run(buf15, buf16, buf17, primals_13, primals_14, buf18, 64, grid=grid(64), stream=stream0)
del primals_14
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0); del buf19 # reuse
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_11.run(buf20, primals_16, buf26, 64, grid=grid(64), stream=stream0)
del primals_16
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0), reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
triton_poi_fused_add_12.run(buf22, primals_18, buf18, 64, grid=grid(64), stream=stream0)
del primals_18
buf23 = buf17; del buf17 # reuse
buf24 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [output_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf22, buf23, buf24, 16, grid=grid(16), stream=stream0)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_5], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_10.run(buf22, buf23, buf24, primals_19, primals_20, buf25, 64, grid=grid(64), stream=stream0)
del buf23
del buf24
del primals_20
return (buf25, primals_10, primals_11, primals_13, primals_19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf6, buf11, buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0), reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf22, primals_17, buf26, primals_15, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1), 0), buf27, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class TimeIntervalMultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask):
bs, seq_len = k.size(0), k.size(1)
k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k)
if not self.kq_same:
q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k)
else:
q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k)
v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_k = inter_k.transpose(2, 3).transpose(1, 2)
inter_v = inter_v.transpose(2, 3).transpose(1, 2)
output = self.scaled_dot_product_attention(q, k, v, inter_k,
inter_v, self.d_k, mask)
output = output.transpose(1, 2).reshape(bs, -1, self.d_model)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1))
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v)
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
class TimeIntervalTransformerLayer(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
self.masked_attn_head = TimeIntervalMultiHeadAttention(d_model,
n_heads, kq_same=kq_same)
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, seq, pos_k, pos_v, inter_k, inter_v, mask):
context = self.masked_attn_head(seq, seq, seq, pos_k, pos_v,
inter_k, inter_v, mask)
context = self.layer_norm1(self.dropout1(context) + seq)
output = self.linear1(context).relu()
output = self.linear2(output)
output = self.layer_norm2(self.dropout2(output) + context)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4, 4, 1]), torch.rand([4, 4, 4, 4, 1]),
torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4, 'n_heads': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (y0 + 4 * x2 + 16 * y1), tmp4, xmask & ymask)
@triton.jit
def triton_poi_fused_eq_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_div_masked_fill_3(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x4 = xindex
y0 = yindex % 4
y5 = yindex
x3 = xindex // 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x4 + 16 * y0), xmask & ymask, eviction_policy
='evict_last').to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x4 + 16 * y5), xmask & ymask)
tmp2 = tl.load(in_ptr1 + (y0 + 4 * x3 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y0, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y0 + 4 * x4 + 64 * y1), xmask & ymask)
tmp4 = tmp2 + tmp3
tmp6 = tmp4 * tmp5
tmp7 = tmp1 + tmp6
tmp8 = 1.0
tmp9 = tmp7 * tmp8
tmp10 = float('-inf')
tmp11 = tl.where(tmp0, tmp10, tmp9)
tl.store(in_out_ptr0 + (x4 + 16 * y5), tmp11, xmask & ymask)
@triton.jit
def triton_per_fused_max_4(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp3, None)
@triton.jit
def triton_poi_fused__softmax_sub_5(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tmp0 - tmp2
tmp5 = tmp4 - tmp2
tmp6 = triton_helpers.maximum(tmp3, tmp5)
tmp8 = tmp7 - tmp2
tmp9 = triton_helpers.maximum(tmp6, tmp8)
tmp11 = tmp10 - tmp2
tmp12 = triton_helpers.maximum(tmp9, tmp11)
tmp13 = tmp3 - tmp12
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp5 - tmp12
tmp16 = tl_math.exp(tmp15)
tmp17 = tmp14 + tmp16
tmp18 = tmp8 - tmp12
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp17 + tmp19
tmp21 = tmp11 - tmp12
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp20 + tmp22
tl.store(out_ptr0 + x0, tmp12, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp0 - tmp2
tmp5 = tmp3 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp8 = tmp6 / tmp7
tmp9 = tmp0 == tmp2
tmp10 = libdevice.isnan(tmp0).to(tl.int1)
tmp11 = libdevice.isnan(tmp2).to(tl.int1)
tmp12 = tmp10 & tmp11
tmp13 = tmp9 | tmp12
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_mul_sum_7(in_out_ptr0, in_ptr0, in_ptr1, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_out_ptr0 + (x2 + 4 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (1 + 4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (4 + y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (8 + y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (3 + 4 * x2 + 16 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (12 + y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp3 = tmp1 * tmp2
tmp6 = tmp4 * tmp5
tmp7 = tmp3 + tmp6
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp14 = tmp12 * tmp13
tmp15 = tmp11 + tmp14
tmp16 = tmp0 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (x2 + 4 * y3), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_8(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_10(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_11(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_12(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_10, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_11, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1))
assert_size_stride(primals_12, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4, 4), (4, 1))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (4, 4), (4, 1))
assert_size_stride(primals_18, (4,), (1,))
assert_size_stride(primals_19, (4,), (1,))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf1)
del primals_5
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf2)
del primals_7
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 4)](buf1, primals_6, buf3, 16, 4,
XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, primals_4,
buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_3
del primals_4
buf5 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (16, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_eq_2[grid(64)](primals_12, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_12
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_div_masked_fill_3[grid(16, 16)](buf7, buf6, buf1,
primals_6, primals_10, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=
4, num_stages=1)
del primals_6
buf8 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_max_4[grid(1)](buf7, buf8, 1, 256, num_warps=2,
num_stages=1)
buf9 = reinterpret_tensor(buf1, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf1
buf10 = reinterpret_tensor(buf0, (4, 4, 4, 1), (16, 4, 1, 64), 0)
del buf0
triton_poi_fused__softmax_sub_5[grid(64)](buf7, buf8, buf9, buf10,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf27 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused__softmax_eq_isnan_logical_and_logical_or_sub_6[grid
(256)](buf7, buf8, buf9, buf10, buf11, buf27, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf7
del buf8
buf12 = reinterpret_tensor(buf9, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf9
triton_poi_fused_clone_1[grid(16, 4)](buf2, primals_8, primals_9,
buf12, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
del primals_8
del primals_9
buf13 = reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf12, (16, 4, 1), (4, 1, 0), 0), out=buf13)
buf14 = reinterpret_tensor(buf13, (4, 4, 4, 1), (16, 4, 1, 1), 0)
del buf13
triton_poi_fused_add_mul_sum_7[grid(16, 4)](buf14, buf11,
primals_11, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1)
buf15 = reinterpret_tensor(buf10, (4, 4, 4), (16, 4, 1), 0)
del buf10
triton_poi_fused_add_native_layer_norm_8[grid(16, 4)](buf14,
primals_1, buf15, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1,
num_stages=1)
buf16 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(16)](buf15, buf16, buf17,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf18 = reinterpret_tensor(buf14, (4, 4, 4), (16, 4, 1), 0)
del buf14
triton_poi_fused_native_layer_norm_10[grid(64)](buf15, buf16, buf17,
primals_13, primals_14, buf18, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del primals_14
buf19 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_15, (4, 4), (1, 4), 0), out=buf19)
buf20 = reinterpret_tensor(buf19, (4, 4, 4), (16, 4, 1), 0)
del buf19
buf26 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_11[grid(64)](buf20,
primals_16, buf26, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_16
buf21 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_17, (4, 4), (1, 4), 0), out=buf21)
buf22 = reinterpret_tensor(buf21, (4, 4, 4), (16, 4, 1), 0)
del buf21
triton_poi_fused_add_12[grid(64)](buf22, primals_18, buf18, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_18
buf23 = buf17
del buf17
buf24 = buf16
del buf16
triton_poi_fused_native_layer_norm_9[grid(16)](buf22, buf23, buf24,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf25 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_10[grid(64)](buf22, buf23, buf24,
primals_19, primals_20, buf25, 64, XBLOCK=64, num_warps=1,
num_stages=1)
del buf23
del buf24
del primals_20
return (buf25, primals_10, primals_11, primals_13, primals_19,
reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf6, buf11,
buf15, reinterpret_tensor(buf18, (16, 4), (4, 1), 0),
reinterpret_tensor(buf20, (16, 4), (4, 1), 0), buf22, primals_17,
buf26, primals_15, reinterpret_tensor(buf12, (16, 1, 4), (4, 1, 1),
0), buf27, reinterpret_tensor(buf3, (16, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (16, 4, 1), (4, 1, 4), 0))
class TimeIntervalMultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask):
bs, seq_len = k.size(0), k.size(1)
k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k)
if not self.kq_same:
q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k)
else:
q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k)
v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_k = inter_k.transpose(2, 3).transpose(1, 2)
inter_v = inter_v.transpose(2, 3).transpose(1, 2)
output = self.scaled_dot_product_attention(q, k, v, inter_k,
inter_v, self.d_k, mask)
output = output.transpose(1, 2).reshape(bs, -1, self.d_model)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1))
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v)
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
class TimeIntervalTransformerLayerNew(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
self.masked_attn_head = TimeIntervalMultiHeadAttention(d_model,
n_heads, kq_same=kq_same)
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5):
primals_2 = self.masked_attn_head.v_linear.weight
primals_3 = self.masked_attn_head.v_linear.bias
primals_5 = self.masked_attn_head.k_linear.weight
primals_6 = self.masked_attn_head.k_linear.bias
primals_7 = self.masked_attn_head.q_linear.weight
primals_8 = self.masked_attn_head.q_linear.bias
primals_13 = self.layer_norm1.weight
primals_14 = self.layer_norm1.bias
primals_15 = self.linear1.weight
primals_16 = self.linear1.bias
primals_17 = self.linear2.weight
primals_18 = self.linear2.bias
primals_19 = self.layer_norm2.weight
primals_20 = self.layer_norm2.bias
primals_1 = input_0
primals_4 = input_1
primals_9 = input_2
primals_10 = input_3
primals_11 = input_4
primals_12 = input_5
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
| Yingting-dev/ReChorus | TimeIntervalTransformerLayer | false | 2,997 | [
"MIT"
] | 0 | a16bc1e42f3e90e889133d7476c52ada44db573b | https://github.com/Yingting-dev/ReChorus/tree/a16bc1e42f3e90e889133d7476c52ada44db573b | import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class TimeIntervalMultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It also needs position and interaction (time interval) key/value input.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
def forward(self, q, k, v, pos_k, pos_v, inter_k, inter_v, mask):
bs, seq_len = k.size(0), k.size(1)
k = (self.k_linear(k) + pos_k).view(bs, seq_len, self.h, self.d_k)
if not self.kq_same:
q = self.q_linear(q).view(bs, seq_len, self.h, self.d_k)
else:
q = self.k_linear(q).view(bs, seq_len, self.h, self.d_k)
v = (self.v_linear(v) + pos_v).view(bs, seq_len, self.h, self.d_k)
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
inter_k = inter_k.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_v = inter_v.view(bs, seq_len, seq_len, self.h, self.d_k)
inter_k = inter_k.transpose(2, 3).transpose(1, 2)
inter_v = inter_v.transpose(2, 3).transpose(1, 2)
output = self.scaled_dot_product_attention(q, k, v, inter_k,
inter_v, self.d_k, mask)
output = output.transpose(1, 2).reshape(bs, -1, self.d_model)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, inter_k, inter_v, d_k, mask):
"""
Involve pair interaction embeddings when calculating attention scores and output
"""
scores = torch.matmul(q, k.transpose(-2, -1))
scores += (q[:, :, :, None, :] * inter_k).sum(-1)
scores = scores / d_k ** 0.5
scores.masked_fill_(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
output = torch.matmul(scores, v)
output += (scores[:, :, :, :, None] * inter_v).sum(-2)
return output
class Model(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
self.masked_attn_head = TimeIntervalMultiHeadAttention(d_model,
n_heads, kq_same=kq_same)
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, seq, pos_k, pos_v, inter_k, inter_v, mask):
context = self.masked_attn_head(seq, seq, seq, pos_k, pos_v,
inter_k, inter_v, mask)
context = self.layer_norm1(self.dropout1(context) + seq)
output = self.linear1(context).relu()
output = self.linear2(output)
output = self.layer_norm2(self.dropout2(output) + context)
return output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4,
4]), torch.rand([4, 4, 4, 4, 1]), torch.rand([4, 4, 4, 4, 1]),
torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
GeneralizedMeanPooling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xs/cxsstbq3wbw7zaisbdiefnedbcsbrsh6zg7qkoguxoj7qqghnei2.py
# Topologically Sorted Source Nodes: [clamp, x, adaptive_avg_pool2d, pow_2], Original ATen: [aten.clamp, aten.pow, aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool2d => mean
# clamp => clamp_min
# pow_2 => pow_2
# x => pow_1
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-06), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%clamp_min, 4.0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1, -2], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%mean, 0.25), kwargs = {})
triton_per_fused_clamp_mean_pow_0 = async_compile.triton('triton_per_fused_clamp_mean_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_mean_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 16.0
tmp10 = tmp8 / tmp9
tmp11 = 0.25
tmp12 = libdevice.pow(tmp10, tmp11)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [clamp, x, adaptive_avg_pool2d, pow_2], Original ATen: [aten.clamp, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_mean_pow_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.transforms import *
from torch import nn
class GeneralizedMeanPooling(nn.Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-06):
super(GeneralizedMeanPooling, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size
).pow(1.0 / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'norm': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torchvision.transforms import *
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(xmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 16.0
tmp10 = tmp8 / tmp9
tmp11 = 0.25
tmp12 = libdevice.pow(tmp10, tmp11)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_clamp_mean_pow_0[grid(16)](buf1, arg0_1, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class GeneralizedMeanPoolingNew(nn.Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-06):
super(GeneralizedMeanPoolingNew, self).__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ZoRoronoa/Camera-Aware-Proxy | GeneralizedMeanPooling | false | 2,998 | [
"Apache-2.0"
] | 0 | 352f900bbae330f18c2bfe2b3f2516fb4e31adea | https://github.com/ZoRoronoa/Camera-Aware-Proxy/tree/352f900bbae330f18c2bfe2b3f2516fb4e31adea | import torch
from torchvision.transforms import *
from torch import nn
class Model(nn.Module):
"""Applies a 2D power-average adaptive pooling over an input signal composed of several input planes.
The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)`
- At p = infinity, one gets Max Pooling
- At p = 1, one gets Average Pooling
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
"""
def __init__(self, norm, output_size=1, eps=1e-06):
super().__init__()
assert norm > 0
self.p = float(norm)
self.output_size = output_size
self.eps = eps
def forward(self, x):
x = x.clamp(min=self.eps).pow(self.p)
return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size
).pow(1.0 / self.p)
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.p
) + ', ' + 'output_size=' + str(self.output_size) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SpatialGather_Module | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/iv/civr7hz7pwb7nd5q352sqsjvxezkx6m6jnyztaygkt2ugewh5ejx.py
# Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# probs_1 => div, exp, sum_1
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [2], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 1), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_per_fused__softmax_0 = async_compile.triton('triton_per_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + (16*x0)), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_0.run(arg0_1, buf2, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [probs_1, ocr_context], Original ATen: [aten._softmax, aten.bmm]
extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64, 1, 16), 0), out=buf3)
del arg1_1
del buf2
return (reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._utils
class SpatialGather_Module(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
Output:
The correlation of every class map with every feature map
shape = [n, num_feats, num_classes, 1]
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_Module, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, _, _ = probs.size(0), probs.size(1), probs.size(2
), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats)
ocr_context = ocr_context.permute(0, 2, 1).unsqueeze(3)
return ocr_context
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch._utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12 = tl.where(xmask, tmp10, 0)
tmp13 = tl.sum(tmp12, 1)[:, None]
tmp14 = tmp9 / tmp13
tl.store(out_ptr2 + (r1 + 16 * x0), tmp14, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_0[grid(16)](arg0_1, buf2, 16, 16, XBLOCK=
8, num_warps=2, num_stages=1)
del arg0_1
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf2, reinterpret_tensor(arg1_1, (4, 16, 4), (64,
1, 16), 0), out=buf3)
del arg1_1
del buf2
return reinterpret_tensor(buf3, (4, 4, 4, 1), (16, 1, 4, 1), 0),
class SpatialGather_ModuleNew(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
Output:
The correlation of every class map with every feature map
shape = [n, num_feats, num_classes, 1]
"""
def __init__(self, cls_num=0, scale=1):
super(SpatialGather_ModuleNew, self).__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| Zhoushanglin100/Cityscape-model | SpatialGather_Module | false | 2,999 | [
"BSD-3-Clause"
] | 0 | 62b3d25712f16f01d951d5168d0f11e3133cd06b | https://github.com/Zhoushanglin100/Cityscape-model/tree/62b3d25712f16f01d951d5168d0f11e3133cd06b | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch._utils
class Model(nn.Module):
"""
Aggregate the context features according to the initial
predicted probability distribution.
Employ the soft-weighted method to aggregate the context.
Output:
The correlation of every class map with every feature map
shape = [n, num_feats, num_classes, 1]
"""
def __init__(self, cls_num=0, scale=1):
super().__init__()
self.cls_num = cls_num
self.scale = scale
def forward(self, feats, probs):
batch_size, c, _, _ = probs.size(0), probs.size(1), probs.size(2
), probs.size(3)
probs = probs.view(batch_size, c, -1)
feats = feats.view(batch_size, feats.size(1), -1)
feats = feats.permute(0, 2, 1)
probs = F.softmax(self.scale * probs, dim=2)
ocr_context = torch.matmul(probs, feats)
ocr_context = ocr_context.permute(0, 2, 1).unsqueeze(3)
return ocr_context
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
ToLongTensor | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ml/cmltiy5hrv2wqhdqt3fezcfhgimkd7dolxjtsc7omtdmbpeu3exe.py
# Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# tensor => convert_element_type
# Graph fragment:
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.float32), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from typing import List
import torch.nn as nn
class ToLongTensor(nn.Module):
"""Convert a list of integers to long tensor
"""
def __init__(self):
super(ToLongTensor, self).__init__()
def forward(self, tokens: 'List[List[int]]') ->Tensor:
return torch.tensor(tokens)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ToLongTensorNew(nn.Module):
"""Convert a list of integers to long tensor
"""
def __init__(self):
super(ToLongTensorNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ZongHR/text | ToLongTensor | false | 3,000 | [
"BSD-3-Clause"
] | 0 | 856607154be7c784505869f10ae578346868b121 | https://github.com/ZongHR/text/tree/856607154be7c784505869f10ae578346868b121 | import torch
from torch import Tensor
from typing import List
import torch.nn as nn
class Model(nn.Module):
"""Convert a list of integers to long tensor
"""
def __init__(self):
super().__init__()
def forward(self, tokens: 'List[List[int]]') ->Tensor:
return torch.tensor(tokens)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SirenLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/tf/ctf5awjfqwzm4jd56ww7cds6w5n2srw3typs5gf4q4uka7u7nllb.py
# Topologically Sorted Source Nodes: [mul, out_1], Original ATen: [aten.mul, aten.sin]
# Source node to ATen node mapping:
# mul => mul
# out_1 => sin
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {})
triton_poi_fused_mul_sin_0 = async_compile.triton('triton_poi_fused_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, out_1], Original ATen: [aten.mul, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sin_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
class Sine(nn.Module):
def __init__(self, w0=30.0):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin(self.w0 * x)
class SirenLayer(nn.Module):
def __init__(self, input_dim, hidden_dim, use_bias=True, w0=1.0,
is_first=False):
super().__init__()
self.layer = nn.Linear(input_dim, hidden_dim, bias=use_bias)
self.activation = Sine(w0)
self.is_first = is_first
self.input_dim = input_dim
self.w0 = w0
self.c = 6
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
dim = self.input_dim
w_std = 1 / dim if self.is_first else math.sqrt(self.c / dim
) / self.w0
self.layer.weight.uniform_(-w_std, w_std)
if self.layer.bias is not None:
self.layer.bias.uniform_(-w_std, w_std)
def forward(self, x):
out = self.layer(x)
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'hidden_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.sin(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sin_0[grid(256)](buf0, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0
class Sine(nn.Module):
def __init__(self, w0=30.0):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin(self.w0 * x)
class SirenLayerNew(nn.Module):
def __init__(self, input_dim, hidden_dim, use_bias=True, w0=1.0,
is_first=False):
super().__init__()
self.layer = nn.Linear(input_dim, hidden_dim, bias=use_bias)
self.activation = Sine(w0)
self.is_first = is_first
self.input_dim = input_dim
self.w0 = w0
self.c = 6
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
dim = self.input_dim
w_std = 1 / dim if self.is_first else math.sqrt(self.c / dim
) / self.w0
self.layer.weight.uniform_(-w_std, w_std)
if self.layer.bias is not None:
self.layer.bias.uniform_(-w_std, w_std)
def forward(self, input_0):
primals_1 = self.layer.weight
primals_2 = self.layer.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ZixiuHuang/nex-code | SirenLayer | false | 3,001 | [
"MIT"
] | 0 | c9432fb675914391b4de4786220351a0dc35aecb | https://github.com/ZixiuHuang/nex-code/tree/c9432fb675914391b4de4786220351a0dc35aecb | import math
import torch
import torch.nn as nn
class Sine(nn.Module):
def __init__(self, w0=30.0):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin(self.w0 * x)
class Model(nn.Module):
def __init__(self, input_dim, hidden_dim, use_bias=True, w0=1.0,
is_first=False):
super().__init__()
self.layer = nn.Linear(input_dim, hidden_dim, bias=use_bias)
self.activation = Sine(w0)
self.is_first = is_first
self.input_dim = input_dim
self.w0 = w0
self.c = 6
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
dim = self.input_dim
w_std = 1 / dim if self.is_first else math.sqrt(self.c / dim
) / self.w0
self.layer.weight.uniform_(-w_std, w_std)
if self.layer.bias is not None:
self.layer.bias.uniform_(-w_std, w_std)
def forward(self, x):
out = self.layer(x)
out = self.activation(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Biaffine | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/kq/ckqzjwsdojunslaxexvivgmdizmhm6czh2xpzemjckiggsrasgr3.py
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tl.store(out_ptr0 + (x2 + (16*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [affine], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1), 0), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [contiguous], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf1, buf2, 16, 16, grid=grid(16, 16), stream=stream0)
del buf1
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Callable
from typing import Optional
from torch import nn
class Biaffine(nn.Module):
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int', init_func: 'Optional[Callable]'=None) ->None:
super(Biaffine, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.linear_in_features = in1_features
self.linear_out_features = out_features * in2_features
self._linear = nn.Linear(in_features=self.linear_in_features,
out_features=self.linear_out_features)
self.reset_parameters(init_func=init_func)
def reset_parameters(self, init_func: 'Optional[Callable]'=None) ->None:
if init_func:
init_func(self._linear.weight)
def forward(self, input1: 'torch.Tensor', input2: 'torch.Tensor'):
batch_size, len1, _dim1 = input1.size()
batch_size, len2, dim2 = input2.size()
affine = self._linear(input1)
affine = affine.view(batch_size, len1 * self.out_features, dim2)
input2 = torch.transpose(input2, 1, 2)
biaffine = torch.transpose(torch.bmm(affine, input2), 1, 2)
biaffine = biaffine.contiguous().view(batch_size, len2, len1, self.
out_features)
return biaffine
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in1_features': 4, 'in2_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from typing import Callable
from typing import Optional
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tl.store(out_ptr0 + (x2 + 16 * y3), tmp0, xmask & ymask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (16, 4), (4, 1))
assert_size_stride(primals_4, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(primals_1, (16,
4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 16), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_3
del primals_4
buf1 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 16, 4), (64, 4, 1),
0), reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0),
out=buf1)
buf2 = reinterpret_tensor(buf0, (4, 4, 16), (64, 16, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_clone_0[grid(16, 16)](buf1, buf2, 16, 16, XBLOCK=
16, YBLOCK=16, num_warps=4, num_stages=1)
del buf1
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), primals_2
class BiaffineNew(nn.Module):
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int', init_func: 'Optional[Callable]'=None) ->None:
super(BiaffineNew, self).__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.linear_in_features = in1_features
self.linear_out_features = out_features * in2_features
self._linear = nn.Linear(in_features=self.linear_in_features,
out_features=self.linear_out_features)
self.reset_parameters(init_func=init_func)
def reset_parameters(self, init_func: 'Optional[Callable]'=None) ->None:
if init_func:
init_func(self._linear.weight)
def forward(self, input_0, input_1):
primals_3 = self._linear.weight
primals_4 = self._linear.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| Zzoay/dependency_representations | Biaffine | false | 3,002 | [
"Apache-2.0"
] | 0 | 7f4726629878aaf9bfee645fe1b11032df05c82e | https://github.com/Zzoay/dependency_representations/tree/7f4726629878aaf9bfee645fe1b11032df05c82e | import torch
from typing import Callable
from typing import Optional
from torch import nn
class Model(nn.Module):
def __init__(self, in1_features: 'int', in2_features: 'int',
out_features: 'int', init_func: 'Optional[Callable]'=None) ->None:
super().__init__()
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.linear_in_features = in1_features
self.linear_out_features = out_features * in2_features
self._linear = nn.Linear(in_features=self.linear_in_features,
out_features=self.linear_out_features)
self.reset_parameters(init_func=init_func)
def reset_parameters(self, init_func: 'Optional[Callable]'=None) ->None:
if init_func:
init_func(self._linear.weight)
def forward(self, input1: 'torch.Tensor', input2: 'torch.Tensor'):
batch_size, len1, _dim1 = input1.size()
batch_size, len2, dim2 = input2.size()
affine = self._linear(input1)
affine = affine.view(batch_size, len1 * self.out_features, dim2)
input2 = torch.transpose(input2, 1, 2)
biaffine = torch.transpose(torch.bmm(affine, input2), 1, 2)
biaffine = biaffine.contiguous().view(batch_size, len2, len1, self.
out_features)
return biaffine
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
RobertaClassificationHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ey/cey6dsgmzj2byupf73e6nwt5fetf5ne2sa57kzcmy7ejvaqhqb72.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf4, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from typing import Optional
class RobertaClassificationHead(nn.Module):
def __init__(self, num_classes, input_dim, inner_dim: 'Optional[int]'=
None, dropout: 'float'=0.1, activation=nn.ReLU):
super().__init__()
if not inner_dim:
inner_dim = input_dim
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
self.activation_fn = activation()
def forward(self, features):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_classes': 4, 'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2,
primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
buf2, (16, 4), (4, 1), 0), primals_4, buf4
class RobertaClassificationHeadNew(nn.Module):
def __init__(self, num_classes, input_dim, inner_dim: 'Optional[int]'=
None, dropout: 'float'=0.1, activation=nn.ReLU):
super().__init__()
if not inner_dim:
inner_dim = input_dim
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
self.activation_fn = activation()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ZongHR/text | RobertaClassificationHead | false | 3,003 | [
"BSD-3-Clause"
] | 0 | 856607154be7c784505869f10ae578346868b121 | https://github.com/ZongHR/text/tree/856607154be7c784505869f10ae578346868b121 | import torch
import torch.nn as nn
from typing import Optional
class Model(nn.Module):
def __init__(self, num_classes, input_dim, inner_dim: 'Optional[int]'=
None, dropout: 'float'=0.1, activation=nn.ReLU):
super().__init__()
if not inner_dim:
inner_dim = input_dim
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
self.activation_fn = activation()
def forward(self, features):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
RobertaMaskLeanerHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/hu/chudakkfedhuifkewfp7tjye2h2gc3seiwcwdrzqsgqssxvexr33.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten._softmax, aten.add]
# Source node to ATen node mapping:
# x_3 => div, exp, sum_1
# x_4 => add
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, 0.0001), kwargs = {})
triton_per_fused__softmax_add_0 = async_compile.triton('triton_per_fused__softmax_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, float("-inf"))
tmp9 = triton_helpers.max2(tmp8, 1)[:, None]
tmp10 = tmp5 - tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp13 / tmp17
tmp19 = 0.0001
tmp20 = tmp18 + tmp19
tl.store(out_ptr2 + (r1 + (16*x0)), tmp18, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten._softmax, aten.add]
stream0 = get_raw_stream(0)
triton_per_fused__softmax_add_0.run(buf0, primals_2, buf3, buf4, 4, 16, grid=grid(4), stream=stream0)
del buf0
del primals_2
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class RobertaMaskLeanerHead(nn.Module):
"""
Head for mask leaner.
input: (batch, src_lens, embed_dim)
output: (batch, src_lens,1)
"""
def __init__(self, embed_dim):
super().__init__()
self.dense = nn.Linear(embed_dim, 1)
self.scaling = embed_dim ** -0.5
def forward(self, features, **kwargs):
x = self.dense(features)
x = x.view(x.size(0), -1)
x = x * self.scaling
x = F.softmax(x, dim=-1)
x = x + 0.0001
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__softmax_add_0(in_ptr0, in_ptr1, out_ptr2, out_ptr3,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 1.0
tmp5 = tmp3 * tmp4
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, float('-inf'))
tmp9 = triton_helpers.max2(tmp8, 1)[:, None]
tmp10 = tmp5 - tmp9
tmp11 = 0.5
tmp12 = tmp10 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp18 = tmp13 / tmp17
tmp19 = 0.0001
tmp20 = tmp18 + tmp19
tl.store(out_ptr2 + (r1 + 16 * x0), tmp18, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp20, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 4), (4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 1), (1, 4), 0), out=buf0)
del primals_1
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
buf4 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__softmax_add_0[grid(4)](buf0, primals_2, buf3,
buf4, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del primals_2
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf3
class RobertaMaskLeanerHeadNew(nn.Module):
"""
Head for mask leaner.
input: (batch, src_lens, embed_dim)
output: (batch, src_lens,1)
"""
def __init__(self, embed_dim):
super().__init__()
self.dense = nn.Linear(embed_dim, 1)
self.scaling = embed_dim ** -0.5
def forward(self, input_0):
primals_1 = self.dense.weight
primals_2 = self.dense.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| a1600012888/fairseq | RobertaMaskLeanerHead | false | 3,004 | [
"MIT"
] | 0 | dbd2cd08fc396f919d2e737513095fcb966896c0 | https://github.com/a1600012888/fairseq/tree/dbd2cd08fc396f919d2e737513095fcb966896c0 | import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import torch.onnx.operators
import torch.optim
import torch.optim.lr_scheduler
class Model(nn.Module):
"""
Head for mask leaner.
input: (batch, src_lens, embed_dim)
output: (batch, src_lens,1)
"""
def __init__(self, embed_dim):
super().__init__()
self.dense = nn.Linear(embed_dim, 1)
self.scaling = embed_dim ** -0.5
def forward(self, features, **kwargs):
x = self.dense(features)
x = x.view(x.size(0), -1)
x = x * self.scaling
x = F.softmax(x, dim=-1)
x = x + 0.0001
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
Block | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/pw/cpw5jgywzg5ntkknxkt5orxsrrr5zq7a6eoteboi3ba7zrcxj2p7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 4), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/up/cup7k6lckdxdeumc5wkgvvawwr2i7omafeixxhsjlwoqypnernlr.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add, clone, rsqrt, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x2), tmp8, xmask)
tl.store(out_ptr1 + (x2), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vl/cvl5znscfya2vyloz4woz3qs3yhrnlmrhqdjrm2ups76tnqugnij.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# x_2 => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-06), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {})
triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2 + (4*y3)), tmp8, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/z6/cz63vnafhvqvexb6jef6mkdhstxrrarf4q5x6w7vrrn5dk6hodcl.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# x_4 => add_2, erf, mul_2, mul_3, mul_4
# Graph fragment:
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_3,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %add_2), kwargs = {})
triton_poi_fused_gelu_3 = async_compile.triton('triton_poi_fused_gelu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rb/crbbxy4r73vzsuemoueepohmvczbngc44c526mksdnqkvidq3k2g.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_8 => add_3
# Graph fragment:
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %permute_3), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + (16*y3)), tmp4, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_4, primals_5, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
del buf2
del buf3
del primals_5
buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.gelu]
triton_poi_fused_gelu_3.run(buf5, buf6, 1024, grid=grid(1024), stream=stream0)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7)
del primals_9
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.add]
triton_poi_fused_add_4.run(primals_1, primals_10, buf7, buf8, 16, 16, grid=grid(16, 16), stream=stream0)
return (buf8, primals_1, primals_2, primals_4, primals_10, buf1, reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5, reinterpret_tensor(buf6, (64, 16), (16, 1), 0), buf7, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last'
):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ['channels_last', 'channels_first']:
raise NotImplementedError
self.normalized_shape = normalized_shape,
def forward(self, x):
if self.data_format == 'channels_last':
return F.layer_norm(x, self.normalized_shape, self.weight, self
.bias, self.eps)
elif self.data_format == 'channels_first':
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Block(nn.Module):
""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-06):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
self.norm = LayerNorm(dim, eps=1e-06)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2)
x = input + self.drop_path(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-06
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x2, tmp8, xmask)
tl.store(out_ptr1 + x2, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2 + 4 * y3), tmp8, xmask & ymask)
@triton.jit
def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2 + 16 * y3), tmp4, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (4, 16), (16, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(256)](buf1, primals_3, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(64)](buf1, buf2, buf3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_2[grid(64, 4)](buf1, buf2, buf3,
primals_4, primals_5, buf4, 64, 4, XBLOCK=4, YBLOCK=32,
num_warps=4, num_stages=1)
del buf2
del buf3
del primals_5
buf5 = empty_strided_cuda((64, 16), (16, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf4, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 16), (1, 4), 0),
alpha=1, beta=1, out=buf5)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.
float32)
triton_poi_fused_gelu_3[grid(1024)](buf5, buf6, 1024, XBLOCK=128,
num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf6, (64, 16),
(16, 1), 0), reinterpret_tensor(primals_8, (16, 4), (1, 16), 0),
alpha=1, beta=1, out=buf7)
del primals_9
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_4[grid(16, 16)](primals_1, primals_10, buf7,
buf8, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
return (buf8, primals_1, primals_2, primals_4, primals_10, buf1,
reinterpret_tensor(buf4, (64, 4), (4, 1), 0), buf5,
reinterpret_tensor(buf6, (64, 16), (16, 1), 0), buf7, primals_8,
primals_6)
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last'
):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ['channels_last', 'channels_first']:
raise NotImplementedError
self.normalized_shape = normalized_shape,
def forward(self, x):
if self.data_format == 'channels_last':
return F.layer_norm(x, self.normalized_shape, self.weight, self
.bias, self.eps)
elif self.data_format == 'channels_first':
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class BlockNew(nn.Module):
""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-06):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
self.norm = LayerNorm(dim, eps=1e-06)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, input_0):
primals_3 = self.gamma
primals_2 = self.dwconv.weight
primals_4 = self.dwconv.bias
primals_5 = self.norm.weight
primals_9 = self.norm.bias
primals_6 = self.pwconv1.weight
primals_7 = self.pwconv1.bias
primals_8 = self.pwconv2.weight
primals_10 = self.pwconv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| ZhijieXiao-0624/CNXA | Block | false | 3,005 | [
"MIT"
] | 0 | a63b3561010cf87f696a005f8ea252e7cdaa7ca2 | https://github.com/ZhijieXiao-0624/CNXA/tree/a63b3561010cf87f696a005f8ea252e7cdaa7ca2 | import torch
import torch.nn as nn
import torch.nn.functional as F
class LayerNorm(nn.Module):
""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-06, data_format='channels_last'
):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ['channels_last', 'channels_first']:
raise NotImplementedError
self.normalized_shape = normalized_shape,
def forward(self, x):
if self.data_format == 'channels_last':
return F.layer_norm(x, self.normalized_shape, self.weight, self
.bias, self.eps)
elif self.data_format == 'channels_first':
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class Model(nn.Module):
""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-06):
super().__init__()
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
self.norm = LayerNorm(dim, eps=1e-06)
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim),
requires_grad=True) if layer_scale_init_value > 0 else None
self.drop_path = DropPath(drop_path
) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2)
x = input + self.drop_path(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
LinearWeightNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/m6/cm645lheesrjji6wgkstt4nu675ugbbjruised3fke4juyuyosol.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused__weight_norm_interface_0 = async_compile.triton('triton_poi_fused__weight_norm_interface_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dp/cdpmihjazxc2dpfye4tlkemiovtq5jgmt3cquzgrtbm3gn32us7u.py
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
# Source node to ATen node mapping:
# _weight_norm => div, mul
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_poi_fused__weight_norm_interface_1 = async_compile.triton('triton_poi_fused__weight_norm_interface_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__weight_norm_interface_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1), (1, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
stream0 = get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0.run(primals_2, buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten._weight_norm_interface]
triton_poi_fused__weight_norm_interface_1.run(primals_2, primals_1, buf0, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), buf1, primals_1, primals_2, buf0, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LinearWeightNorm(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__weight_norm_interface_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__weight_norm_interface_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 / tmp2
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 1), (1, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__weight_norm_interface_0[grid(4)](primals_2, buf0,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__weight_norm_interface_1[grid(16)](primals_2,
primals_1, buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64,
4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), buf1, primals_1, primals_2, buf0, reinterpret_tensor(primals_4,
(64, 4), (4, 1), 0)
class LinearWeightNormNew(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super(LinearWeightNormNew, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input_0):
primals_3 = self.linear.bias
primals_1 = self.linear.weight_g
primals_2 = self.linear.weight_v
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| TRUMANCFY/wolf | LinearWeightNorm | false | 3,006 | [
"Apache-2.0"
] | 0 | 1a21479256e4f51885e2d2fdd449b1faa61277a6 | https://github.com/TRUMANCFY/wolf/tree/1a21479256e4f51885e2d2fdd449b1faa61277a6 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_features, out_features, bias=True):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias=bias)
self.reset_parameters()
def reset_parameters(self):
nn.init.normal_(self.linear.weight, mean=0.0, std=0.05)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
self.linear = nn.utils.weight_norm(self.linear)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(self.
in_features, self.out_features, self.bias is not None)
def init(self, x, init_scale=1.0):
with torch.no_grad():
out = self(x).view(-1, self.linear.out_features)
mean = out.mean(dim=0)
std = out.std(dim=0)
inv_stdv = init_scale / (std + 1e-06)
self.linear.weight_g.mul_(inv_stdv.unsqueeze(1))
if self.linear.bias is not None:
self.linear.bias.add_(-mean).mul_(inv_stdv)
return self(x)
def forward(self, input):
return self.linear(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ConvBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/zv/czvfpj3ah2lefbwpcuw4esv23bxs5a3ab63ply3ntgbsdktepd5v.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = (xindex // 14)
x2 = (xindex // 1176)
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xe/cxelxvpw3asckozc53rh36773aohp5hqpbp2nos5ymcdqhxvo4bl.py
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# relu_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%relu_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, relu_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0)
return (reinterpret_tensor(buf7, (4, 400), (400, 1), 0), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from math import *
import torch.nn.functional as F
class ConvBlock(nn.Module):
def __init__(self):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(18816)](buf1, primals_2,
18816, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(6400)](buf5, primals_5,
6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1)
return reinterpret_tensor(buf7, (4, 400), (400, 1), 0
), primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6
class ConvBlockNew(nn.Module):
def __init__(self):
super(ConvBlockNew, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| a8252525/NIID-Bench | ConvBlock | false | 3,007 | [
"MIT"
] | 0 | 33df8d3a7b941884eec3c7bd52adb8a9476eb282 | https://github.com/a8252525/NIID-Bench/tree/33df8d3a7b941884eec3c7bd52adb8a9476eb282 | import torch
import torch.nn as nn
from math import *
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return []
|
Unet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/xf/cxfcqgqzkxpafh7ngi5lmyvtgswx4bwrz3upkkjlnp6wzixzpn5k.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/av/cavhjnlb3jhzdiam6ww4odoew4b7etx4mfdwllv5macorejth2wz.py
# Topologically Sorted Source Nodes: [x_2, iadd, x_3], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd => add
# x_2 => convolution_1
# x_3 => gt_1, mul_1, where_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_2, %primals_1), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor, %add, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_1 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_1, %slice_scatter_default, 1, 0, 1), kwargs = {})
# %slice_scatter_default_2 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_1, %slice_12, 1, 0, 1), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_2, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_2, 0.01), kwargs = {})
# %where_1 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %slice_scatter_default_2, %mul_1), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_1 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 32
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 131072)
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x0 + (4096*x2)), tmp3, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x0 + (4096*x2)), tmp2, eviction_policy='evict_last', other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ji/cji5l2esho4uvrao5eeomydgbe3hjk4sdot5hvrdbfwiux6vsdse.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_4 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 32), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/42/c42ryamg2fbchnrdanzxiam3yphdwkdejaxnqprizpnvuejwqtar.py
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_5 => convolution_3
# x_6 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_3, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ag/cageczbpbdq2zvqdke7tpgave3qsnz6g7oqwi32lwbqxmfh6cqgr.py
# Topologically Sorted Source Nodes: [x_7, iadd_1, x_8], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_1 => add_1
# x_7 => convolution_4
# x_8 => gt_3, mul_3, where_3
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_39, %convolution_2), kwargs = {})
# %slice_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_1, %add_1, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_4 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_4, %slice_scatter_default_3, 1, 0, 32), kwargs = {})
# %slice_scatter_default_5 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_4, %slice_49, 1, 0, 32), kwargs = {})
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_5, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_5, 0.01), kwargs = {})
# %where_3 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %slice_scatter_default_5, %mul_3), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 64
x3 = xindex
x2 = (xindex // 65536)
x4 = xindex % 65536
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 32, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + (32768*x2)), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + (32768*x2)), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wm/cwmx573hgzpqv72fn6et3riktwpbit3or4nztj7ce6dfrczkaddw.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_9 => convolution_5
# Graph fragment:
# %convolution_5 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_3, %primals_12, %primals_13, [2, 2], [0, 0], [1, 1], False, [0, 0], 64), kwargs = {})
triton_poi_fused_convolution_5 = async_compile.triton('triton_poi_fused_convolution_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3w/c3wcxzpxikyyqdrw6kpbwbywojsljwovzvu54xks2iemn2a2bek6.py
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_10 => convolution_6
# x_11 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_5, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.01), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_6, %mul_4), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uq/cuqzrsdrmc6acrieikbfirzninntl7f3rpxs5iatziafgkhripbr.py
# Topologically Sorted Source Nodes: [x_12, iadd_2, x_13], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_2 => add_2
# x_12 => convolution_7
# x_13 => gt_5, mul_5, where_5
# Graph fragment:
# %convolution_7 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_4, %primals_16, %primals_17, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_76, %convolution_5), kwargs = {})
# %slice_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_2, %add_2, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_7 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_7, %slice_scatter_default_6, 1, 0, 64), kwargs = {})
# %slice_scatter_default_8 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_7, %slice_86, 1, 0, 64), kwargs = {})
# %gt_5 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_8, 0), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_8, 0.01), kwargs = {})
# %where_5 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %slice_scatter_default_8, %mul_5), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_7 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 128
x3 = xindex
x2 = (xindex // 32768)
x4 = xindex % 32768
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 64, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + (16384*x2)), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + (16384*x2)), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/z5/cz5htevaalapzdiv2ghjv26w2a3mj4iej6i77fm4swdcujsblszp.py
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_14 => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_5, %primals_18, %primals_19, [2, 2], [0, 0], [1, 1], False, [0, 0], 128), kwargs = {})
triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qm/cqmscszyypzxc4zzgkthiigoojenozafjwojwc52fv6pbxgqrvep.py
# Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_15 => convolution_9
# x_16 => gt_6, mul_6, where_6
# Graph fragment:
# %convolution_9 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_8, %primals_20, %primals_21, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_9, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_9, 0.01), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_9, %mul_6), kwargs = {})
triton_poi_fused_convolution_leaky_relu_9 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pn/cpnmjt3x2nmyv3a3tzn6a3wszlt7xdk4x5wv6ffqsn324kfaib4f.py
# Topologically Sorted Source Nodes: [x_17, iadd_3, x_18], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_3 => add_3
# x_17 => convolution_10
# x_18 => gt_7, mul_7, where_7
# Graph fragment:
# %convolution_10 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_6, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%slice_113, %convolution_8), kwargs = {})
# %slice_scatter_default_9 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_tensor_3, %add_3, 3, 0, 9223372036854775807), kwargs = {})
# %slice_scatter_default_10 : [num_users=2] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%convolution_10, %slice_scatter_default_9, 1, 0, 128), kwargs = {})
# %slice_scatter_default_11 : [num_users=3] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%slice_scatter_default_10, %slice_123, 1, 0, 128), kwargs = {})
# %gt_7 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%slice_scatter_default_11, 0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%slice_scatter_default_11, 0.01), kwargs = {})
# %where_7 : [num_users=3] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %slice_scatter_default_11, %mul_7), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_10 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_10(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 256
x3 = xindex
x2 = (xindex // 16384)
x4 = xindex % 16384
tmp21 = tl.load(in_ptr0 + (x3), None)
tmp22 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 128, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + (x3), tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + (x1), tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + (8192*x2)), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + (x3), tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + (x1), tmp2, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + (8192*x2)), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + (x3), tmp27, None)
tl.store(out_ptr1 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pn/cpntx5ingplfviuf3scpf2ploxf2cbj6dppd5twxxapjxxzeb2gw.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_19 => convolution_11
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_7, %primals_24, %primals_25, [2, 2], [0, 0], [1, 1], False, [0, 0], 256), kwargs = {})
triton_poi_fused_convolution_11 = async_compile.triton('triton_poi_fused_convolution_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5p/c5pvu73hvxjushlgcdbeq425vx4apnqtyg3qns5wrorg2nbmszzc.py
# Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_20 => convolution_12
# x_21 => gt_8, mul_8, where_8
# Graph fragment:
# %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%convolution_11, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_12, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_12, 0.01), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_12, %mul_8), kwargs = {})
triton_poi_fused_convolution_leaky_relu_12 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rh/crhinxcz2vvog2xvydnfton7wspkzsg5t3cjjz2xt2wuzhoi354w.py
# Topologically Sorted Source Nodes: [x_22, iadd_4, x_23], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_4 => add_4
# x_22 => convolution_13
# x_23 => gt_9, mul_9, where_9
# Graph fragment:
# %convolution_13 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_8, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_4 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_13, %convolution_11), kwargs = {})
# %gt_9 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_4, 0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_4, 0.01), kwargs = {})
# %where_9 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_9, %add_4, %mul_9), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_13 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x3), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xz/cxzdy4wlhiutyuwqhvz5mul3hsest2j4rhjcdy5iicga6ukzem6f.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_25 => cat
# Graph fragment:
# %cat : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_14, %where_7], 1), kwargs = {})
triton_poi_fused_cat_14 = async_compile.triton('triton_poi_fused_cat_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 512
x0 = xindex % 64
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 512, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zt/cztj2a2rppkblw2uusvgftlbe5t6xd2fvbauq357vhaoyj3caget.py
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_26 => convolution_15
# x_27 => gt_10, mul_10, where_10
# Graph fragment:
# %convolution_15 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_10 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_15, 0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_15, 0.01), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %convolution_15, %mul_10), kwargs = {})
triton_poi_fused_convolution_leaky_relu_15 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/eq/ceqlawpx2k7y4mlllr6mlufqinhrt5n2lyebv6w73yy753ifmwl4.py
# Topologically Sorted Source Nodes: [x_28, iadd_5, x_29], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_5 => add_5
# x_28 => convolution_16
# x_29 => gt_11, mul_11, where_11
# Graph fragment:
# %convolution_16 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_5 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_16, %slice_181), kwargs = {})
# %gt_11 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_5, 0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, 0.01), kwargs = {})
# %where_11 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %add_5, %mul_11), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_16 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_16(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 128
x2 = (xindex // 8192)
x4 = xindex % 8192
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + (32768*x2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/si/csi3nwzyblsdh2cv7kre43tug6gju4orzk6cf7c7tldsjdjh4fge.py
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_31 => cat_1
# Graph fragment:
# %cat_1 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_17, %where_5], 1), kwargs = {})
triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x2 = (xindex // 65536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 256, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vg/cvgp54mfma7r4duil452q6fabyts4tskbusq47l54yokhacdvrdo.py
# Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_32 => convolution_18
# x_33 => gt_12, mul_12, where_12
# Graph fragment:
# %convolution_18 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_12 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_18, 0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_18, 0.01), kwargs = {})
# %where_12 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_12, %convolution_18, %mul_12), kwargs = {})
triton_poi_fused_convolution_leaky_relu_18 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_18(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bx/cbxupgpftsatjh5mnu5m25yosvv4kcrmlb3ewe5ehw6h62behogi.py
# Topologically Sorted Source Nodes: [x_34, iadd_6, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_6 => add_6
# x_34 => convolution_19
# x_35 => gt_13, mul_13, where_13
# Graph fragment:
# %convolution_19 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_12, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_6 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_19, %slice_210), kwargs = {})
# %gt_13 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_6, 0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_6, 0.01), kwargs = {})
# %where_13 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %add_6, %mul_13), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_19 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_19(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
x2 = (xindex // 16384)
x4 = xindex % 16384
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + (65536*x2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qy/cqyx4ivlwl5e3hnixgpop5rodo77ijdhyu4cykacthp6b6pblqtn.py
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_37 => cat_2
# Graph fragment:
# %cat_2 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_20, %where_3], 1), kwargs = {})
triton_poi_fused_cat_20 = async_compile.triton('triton_poi_fused_cat_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 128
x0 = xindex % 1024
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 128, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/go/cgo77j6qmweqj7pb3bvmfnhkd4xbawaqr62ui6dn7unxpajvsiwi.py
# Topologically Sorted Source Nodes: [x_38, x_39], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_38 => convolution_21
# x_39 => gt_14, mul_14, where_14
# Graph fragment:
# %convolution_21 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_44, %primals_45, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_14 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_21, 0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_21, 0.01), kwargs = {})
# %where_14 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_14, %convolution_21, %mul_14), kwargs = {})
triton_poi_fused_convolution_leaky_relu_21 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_21', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_21(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uv/cuvfebza4ywja4afrbp7q4jsfdigha6t7ov3i45g66lzrpeeyydk.py
# Topologically Sorted Source Nodes: [x_40, iadd_7, x_41], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_7 => add_7
# x_40 => convolution_22
# x_41 => gt_15, mul_15, where_15
# Graph fragment:
# %convolution_22 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_14, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_7 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_22, %slice_239), kwargs = {})
# %gt_15 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_7, 0), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.01), kwargs = {})
# %where_15 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %add_7, %mul_15), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_22 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_22(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
x2 = (xindex // 32768)
x4 = xindex % 32768
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + (131072*x2)), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + (x3), tmp6, None)
tl.store(out_ptr1 + (x3), tmp9, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qm/cqmwkfzmikcapbfklwnx5duzt3m2kkiznvbfkik2i6kizaagzljn.py
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_43 => cat_3
# Graph fragment:
# %cat_3 : [num_users=3] = call_function[target=torch.ops.aten.cat.default](args = ([%convolution_23, %where_1], 1), kwargs = {})
triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 64, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp10, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uh/cuhiatd4b3g4fq4hwiu5kyq5ygtth7buqp4idbziynhcgnc5b6kn.py
# Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# x_44 => convolution_24
# x_45 => gt_16, mul_16, where_16
# Graph fragment:
# %convolution_24 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_50, %primals_51, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_16 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_24, 0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_24, 0.01), kwargs = {})
# %where_16 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_16, %convolution_24, %mul_16), kwargs = {})
triton_poi_fused_convolution_leaky_relu_24 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_24(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 0.0
tmp5 = tmp3 > tmp4
tmp6 = 0.01
tmp7 = tmp3 * tmp6
tmp8 = tl.where(tmp5, tmp3, tmp7)
tl.store(out_ptr0 + (x0), tmp5, None)
tl.store(out_ptr1 + (x0), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hm/chmdwzfjerzpipa7qmmz7fdqjhovmyj2quztbhodv7vkpuqk4l6n.py
# Topologically Sorted Source Nodes: [x_46, iadd_8, x_47], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# iadd_8 => add_8
# x_46 => convolution_25
# x_47 => gt_17, mul_17, where_17
# Graph fragment:
# %convolution_25 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%where_16, %primals_52, %primals_53, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_8 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_25, %slice_268), kwargs = {})
# %gt_17 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_8, 0), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_8, 0.01), kwargs = {})
# %where_17 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_17, %add_8, %mul_17), kwargs = {})
triton_poi_fused_add_convolution_leaky_relu_25 = async_compile.triton('triton_poi_fused_add_convolution_leaky_relu_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_leaky_relu_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_25(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
x1 = (xindex // 4096)
tmp0 = tl.load(in_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x0 + (262144*x1)), None)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 0.01
tmp9 = tmp5 * tmp8
tmp10 = tl.where(tmp7, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp7, None)
tl.store(out_ptr1 + (x2), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64, ), (1, ))
assert_size_stride(primals_12, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_13, (64, ), (1, ))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_19, (128, ), (1, ))
assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (256, ), (1, ))
assert_size_stride(primals_22, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_23, (256, ), (1, ))
assert_size_stride(primals_24, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_25, (256, ), (1, ))
assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_27, (256, ), (1, ))
assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_29, (256, ), (1, ))
assert_size_stride(primals_30, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (128, ), (1, ))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (64, ), (1, ))
assert_size_stride(primals_40, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_41, (64, ), (1, ))
assert_size_stride(primals_42, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_43, (64, ), (1, ))
assert_size_stride(primals_44, (32, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_45, (32, ), (1, ))
assert_size_stride(primals_46, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (32, ), (1, ))
assert_size_stride(primals_48, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_49, (32, ), (1, ))
assert_size_stride(primals_50, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_51, (1, ), (1, ))
assert_size_stride(primals_52, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_53, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_3, buf1, buf2, 524288, grid=grid(524288), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_2, iadd, x_3], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_1.run(buf3, primals_5, primals_1, buf4, buf5, 524288, grid=grid(524288), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf7, primals_7, 131072, grid=grid(131072), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_3.run(buf8, primals_9, buf9, buf10, 262144, grid=grid(262144), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf13 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_7, iadd_1, x_8], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_4.run(buf11, primals_11, buf7, buf12, buf13, 262144, grid=grid(262144), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
triton_poi_fused_convolution_5.run(buf15, primals_13, 65536, grid=grid(65536), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1))
buf17 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf16, primals_15, buf17, buf18, 131072, grid=grid(131072), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf20 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf21 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [x_12, iadd_2, x_13], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_7.run(buf19, primals_17, buf15, buf20, buf21, 131072, grid=grid(131072), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf22, (4, 128, 8, 8), (8192, 64, 8, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.convolution]
triton_poi_fused_convolution_8.run(buf23, primals_19, 32768, grid=grid(32768), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_9.run(buf24, primals_21, buf25, buf26, 65536, grid=grid(65536), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [x_17], Original ATen: [aten.convolution]
buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1))
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf29 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [x_17, iadd_3, x_18], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_10.run(buf27, primals_23, buf23, buf28, buf29, 65536, grid=grid(65536), stream=stream0)
del buf27
del primals_23
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf30, (4, 256, 4, 4), (4096, 16, 4, 1))
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.convolution]
triton_poi_fused_convolution_11.run(buf31, primals_25, 16384, grid=grid(16384), stream=stream0)
del primals_25
# Topologically Sorted Source Nodes: [x_20], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 4, 4), (4096, 16, 4, 1))
buf33 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
buf34 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_20, x_21], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_12.run(buf32, primals_27, buf33, buf34, 16384, grid=grid(16384), stream=stream0)
del primals_27
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 4, 4), (4096, 16, 4, 1))
buf36 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool)
buf37 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [x_22, iadd_4, x_23], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_13.run(buf35, primals_29, buf31, buf36, buf37, 16384, grid=grid(16384), stream=stream0)
del primals_29
# Topologically Sorted Source Nodes: [x_24], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1))
buf39 = reinterpret_tensor(buf19, (4, 512, 8, 8), (32768, 64, 8, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.cat]
triton_poi_fused_cat_14.run(buf38, primals_31, buf29, buf39, 131072, grid=grid(131072), stream=stream0)
del primals_31
# Topologically Sorted Source Nodes: [x_26], Original ATen: [aten.convolution]
buf40 = extern_kernels.convolution(buf39, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1))
buf41 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
buf42 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_26, x_27], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_15.run(buf40, primals_33, buf41, buf42, 32768, grid=grid(32768), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.convolution]
buf43 = extern_kernels.convolution(buf42, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool)
buf45 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [x_28, iadd_5, x_29], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_16.run(buf43, primals_35, buf39, buf44, buf45, 32768, grid=grid(32768), stream=stream0)
del buf43
del primals_35
# Topologically Sorted Source Nodes: [x_30], Original ATen: [aten.convolution]
buf46 = extern_kernels.convolution(buf45, primals_36, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1))
buf47 = reinterpret_tensor(buf11, (4, 256, 16, 16), (65536, 256, 16, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [x_31], Original ATen: [aten.cat]
triton_poi_fused_cat_17.run(buf46, primals_37, buf21, buf47, 262144, grid=grid(262144), stream=stream0)
del primals_37
# Topologically Sorted Source Nodes: [x_32], Original ATen: [aten.convolution]
buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 64, 16, 16), (16384, 256, 16, 1))
buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
buf50 = reinterpret_tensor(buf38, (4, 64, 16, 16), (16384, 256, 16, 1), 0); del buf38 # reuse
# Topologically Sorted Source Nodes: [x_32, x_33], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_18.run(buf48, primals_39, buf49, buf50, 65536, grid=grid(65536), stream=stream0)
del primals_39
# Topologically Sorted Source Nodes: [x_34], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 64, 16, 16), (16384, 256, 16, 1))
buf52 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool)
buf53 = buf48; del buf48 # reuse
# Topologically Sorted Source Nodes: [x_34, iadd_6, x_35], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_19.run(buf51, primals_41, buf47, buf52, buf53, 65536, grid=grid(65536), stream=stream0)
del buf51
del primals_41
# Topologically Sorted Source Nodes: [x_36], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_42, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf55 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [x_37], Original ATen: [aten.cat]
triton_poi_fused_cat_20.run(buf54, primals_43, buf13, buf55, 524288, grid=grid(524288), stream=stream0)
del buf54
del primals_43
# Topologically Sorted Source Nodes: [x_38], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf57 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
buf58 = reinterpret_tensor(buf46, (4, 32, 32, 32), (32768, 1024, 32, 1), 0); del buf46 # reuse
# Topologically Sorted Source Nodes: [x_38, x_39], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_21.run(buf56, primals_45, buf57, buf58, 131072, grid=grid(131072), stream=stream0)
del primals_45
# Topologically Sorted Source Nodes: [x_40], Original ATen: [aten.convolution]
buf59 = extern_kernels.convolution(buf58, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf60 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.bool)
buf61 = buf56; del buf56 # reuse
# Topologically Sorted Source Nodes: [x_40, iadd_7, x_41], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_22.run(buf59, primals_47, buf55, buf60, buf61, 131072, grid=grid(131072), stream=stream0)
del buf59
del primals_47
# Topologically Sorted Source Nodes: [x_42], Original ATen: [aten.convolution]
buf62 = extern_kernels.convolution(buf61, primals_48, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_43], Original ATen: [aten.cat]
triton_poi_fused_cat_23.run(buf62, primals_49, buf5, buf63, 1048576, grid=grid(1048576), stream=stream0)
del buf62
del primals_49
# Topologically Sorted Source Nodes: [x_44], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf63, primals_50, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf65 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool)
buf66 = reinterpret_tensor(buf35, (4, 1, 64, 64), (4096, 4096, 64, 1), 0); del buf35 # reuse
# Topologically Sorted Source Nodes: [x_44, x_45], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_24.run(buf64, primals_51, buf65, buf66, 16384, grid=grid(16384), stream=stream0)
del primals_51
# Topologically Sorted Source Nodes: [x_46], Original ATen: [aten.convolution]
buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf68 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1), torch.bool)
buf69 = buf64; del buf64 # reuse
# Topologically Sorted Source Nodes: [x_46, iadd_8, x_47], Original ATen: [aten.convolution, aten.add, aten.leaky_relu]
triton_poi_fused_add_convolution_leaky_relu_25.run(buf67, primals_53, buf63, buf68, buf69, 16384, grid=grid(16384), stream=stream0)
del buf67
del primals_53
return (buf69, primals_1, primals_2, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, primals_48, primals_50, primals_52, buf1, buf2, buf4, buf5, buf7, buf9, buf10, buf12, buf13, buf15, buf17, buf18, buf20, buf21, buf23, buf25, buf26, buf28, buf29, buf31, buf33, buf34, buf36, buf37, buf39, buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53, buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((64, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((256, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((128, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((64, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((64, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((32, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((32, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((1, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, norm=
'batch', residual=True, activation='leakyrelu', transpose=False):
super(ConvBlock, self).__init__()
self.dropout = dropout
self.residual = residual
self.activation = activation
self.transpose = transpose
if self.dropout:
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.05)
self.norm1 = None
self.norm2 = None
if norm == 'batch':
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm == 'instance':
self.norm1 = nn.InstanceNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'mixed':
self.norm1 = nn.BatchNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
if self.transpose:
self.conv1 = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, padding=1)
self.conv2 = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=3, padding=1)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=
3, padding=1)
if self.activation == 'relu':
self.actfun1 = nn.ReLU()
self.actfun2 = nn.ReLU()
elif self.activation == 'leakyrelu':
self.actfun1 = nn.LeakyReLU()
self.actfun2 = nn.LeakyReLU()
elif self.activation == 'elu':
self.actfun1 = nn.ELU()
self.actfun2 = nn.ELU()
elif self.activation == 'selu':
self.actfun1 = nn.SELU()
self.actfun2 = nn.SELU()
def forward(self, x):
ox = x
x = self.conv1(x)
if self.dropout:
x = self.dropout1(x)
if self.norm1:
x = self.norm1(x)
x = self.actfun1(x)
x = self.conv2(x)
if self.dropout:
x = self.dropout2(x)
if self.norm2:
x = self.norm2(x)
if self.residual:
x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox.
shape[1], x.shape[1]), :, :]
x = self.actfun2(x)
return x
class Unet(nn.Module):
def __init__(self, n_channel_in=1, n_channel_out=1, residual=False,
down='conv', up='tconv', activation='selu'):
super(Unet, self).__init__()
self.residual = residual
if down == 'maxpool':
self.down1 = nn.MaxPool2d(kernel_size=2)
self.down2 = nn.MaxPool2d(kernel_size=2)
self.down3 = nn.MaxPool2d(kernel_size=2)
self.down4 = nn.MaxPool2d(kernel_size=2)
elif down == 'avgpool':
self.down1 = nn.AvgPool2d(kernel_size=2)
self.down2 = nn.AvgPool2d(kernel_size=2)
self.down3 = nn.AvgPool2d(kernel_size=2)
self.down4 = nn.AvgPool2d(kernel_size=2)
elif down == 'conv':
self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32)
self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64)
self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25
self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25
self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25
self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25
self.down1.bias.data = 0.01 * self.down1.bias.data + 0
self.down2.bias.data = 0.01 * self.down2.bias.data + 0
self.down3.bias.data = 0.01 * self.down3.bias.data + 0
self.down4.bias.data = 0.01 * self.down4.bias.data + 0
if up == 'bilinear' or up == 'nearest':
self.up1 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up2 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up3 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up4 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
elif up == 'tconv':
self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2,
groups=64)
self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2,
groups=32)
self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25
self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25
self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25
self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25
self.up1.bias.data = 0.01 * self.up1.bias.data + 0
self.up2.bias.data = 0.01 * self.up2.bias.data + 0
self.up3.bias.data = 0.01 * self.up3.bias.data + 0
self.up4.bias.data = 0.01 * self.up4.bias.data + 0
self.conv1 = ConvBlock(n_channel_in, 32, residual, activation)
self.conv2 = ConvBlock(32, 64, residual, activation)
self.conv3 = ConvBlock(64, 128, residual, activation)
self.conv4 = ConvBlock(128, 256, residual, activation)
self.conv5 = ConvBlock(256, 256, residual, activation)
self.conv6 = ConvBlock(2 * 256, 128, residual, activation)
self.conv7 = ConvBlock(2 * 128, 64, residual, activation)
self.conv8 = ConvBlock(2 * 64, 32, residual, activation)
self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation)
if self.residual:
self.convres = ConvBlock(n_channel_in, n_channel_out, residual,
activation)
def forward(self, x):
c0 = x
c1 = self.conv1(x)
x = self.down1(c1)
c2 = self.conv2(x)
x = self.down2(c2)
c3 = self.conv3(x)
x = self.down3(c3)
c4 = self.conv4(x)
x = self.down4(c4)
x = self.conv5(x)
x = self.up1(x)
x = torch.cat([x, c4], 1)
x = self.conv6(x)
x = self.up2(x)
x = torch.cat([x, c3], 1)
x = self.conv7(x)
x = self.up3(x)
x = torch.cat([x, c2], 1)
x = self.conv8(x)
x = self.up4(x)
x = torch.cat([x, c1], 1)
x = self.conv9(x)
if self.residual:
x = torch.add(x, self.convres(c0))
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 32
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 131072
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp3, eviction_policy=
'evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp2, eviction_policy=
'evict_last', other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_4(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 64
x3 = xindex
x2 = xindex // 65536
x4 = xindex % 65536
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 32, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + 32768 * x2), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 128
x3 = xindex
x2 = xindex // 32768
x4 = xindex % 32768
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 64, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + 16384 * x2), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_9(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_10(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 256
x3 = xindex
x2 = xindex // 16384
x4 = xindex % 16384
tmp21 = tl.load(in_ptr0 + x3, None)
tmp22 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp0 = x1
tmp1 = tl.full([1], 128, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tmp2 & tmp2
tmp4 = tl.load(in_ptr0 + x3, tmp3, other=0.0)
tmp5 = tl.load(in_ptr1 + x1, tmp3, eviction_policy='evict_last', other=0.0)
tmp6 = tmp4 + tmp5
tmp7 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp3, other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp3, tmp8, tmp9)
tmp11 = tl.load(in_ptr0 + x3, tmp2, other=0.0)
tmp12 = tl.load(in_ptr1 + x1, tmp2, eviction_policy='evict_last', other=0.0
)
tmp13 = tmp11 + tmp12
tmp14 = tl.where(tmp2, tmp10, tmp13)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp2, tmp14, tmp15)
tmp17 = tl.load(in_ptr2 + (x4 + 8192 * x2), tmp2, other=0.0)
tmp18 = tmp13 + tmp17
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp2, tmp18, tmp19)
tmp23 = tmp21 + tmp22
tmp24 = tl.where(tmp2, tmp20, tmp23)
tmp25 = tl.where(tmp2, tmp16, tmp24)
tmp26 = 0.0
tmp27 = tmp25 > tmp26
tmp28 = 0.01
tmp29 = tmp25 * tmp28
tmp30 = tl.where(tmp27, tmp25, tmp29)
tl.store(out_ptr0 + x3, tmp27, None)
tl.store(out_ptr1 + x3, tmp30, None)
@triton.jit
def triton_poi_fused_convolution_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_12(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_13(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_14(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 512
x0 = xindex % 64
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_15(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_16(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 128
x2 = xindex // 8192
x4 = xindex % 8192
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + 32768 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_18(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_19(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
x2 = xindex // 16384
x4 = xindex % 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + 65536 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_20(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 128
x0 = xindex % 1024
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_21(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_22(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
x2 = xindex // 32768
x4 = xindex % 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x4 + 131072 * x2), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 > tmp5
tmp7 = 0.01
tmp8 = tmp4 * tmp7
tmp9 = tl.where(tmp6, tmp4, tmp8)
tl.store(out_ptr0 + x3, tmp6, None)
tl.store(out_ptr1 + x3, tmp9, None)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp13 = tl.load(in_ptr2 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp10,
other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x3, tmp14, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_24(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = 0.0
tmp5 = tmp3 > tmp4
tmp6 = 0.01
tmp7 = tmp3 * tmp6
tmp8 = tl.where(tmp5, tmp3, tmp7)
tl.store(out_ptr0 + x0, tmp5, None)
tl.store(out_ptr1 + x0, tmp8, None)
@triton.jit
def triton_poi_fused_add_convolution_leaky_relu_25(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4096
x1 = xindex // 4096
tmp0 = tl.load(in_ptr0 + x2, None)
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x0 + 262144 * x1), None)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = tmp5 > tmp6
tmp8 = 0.01
tmp9 = tmp5 * tmp8
tmp10 = tl.where(tmp7, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp7, None)
tl.store(out_ptr1 + x2, tmp10, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52, primals_53
) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (64,), (1,))
assert_size_stride(primals_12, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_13, (64,), (1,))
assert_size_stride(primals_14, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_23, (256,), (1,))
assert_size_stride(primals_24, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_25, (256,), (1,))
assert_size_stride(primals_26, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_27, (256,), (1,))
assert_size_stride(primals_28, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_29, (256,), (1,))
assert_size_stride(primals_30, (256, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (128, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (128,), (1,))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (64, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (64, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_43, (64,), (1,))
assert_size_stride(primals_44, (32, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_45, (32,), (1,))
assert_size_stride(primals_46, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (32,), (1,))
assert_size_stride(primals_48, (32, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_49, (32,), (1,))
assert_size_stride(primals_50, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_51, (1,), (1,))
assert_size_stride(primals_52, (1, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_53, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0,
primals_3, buf1, buf2, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_add_convolution_leaky_relu_1[grid(524288)](buf3,
primals_5, primals_1, buf4, buf5, 524288, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_5
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf6, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_2[grid(131072)](buf7, primals_7,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf10 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_3[grid(262144)](buf8,
primals_9, buf9, buf10, 262144, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_9
buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf12 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf13 = buf8
del buf8
triton_poi_fused_add_convolution_leaky_relu_4[grid(262144)](buf11,
primals_11, buf7, buf12, buf13, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_11
buf14 = extern_kernels.convolution(buf13, primals_12, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf14, (4, 64, 16, 16), (16384, 256, 16, 1))
buf15 = buf14
del buf14
triton_poi_fused_convolution_5[grid(65536)](buf15, primals_13,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_13
buf16 = extern_kernels.convolution(buf15, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 128, 16, 16), (32768, 256, 16, 1))
buf17 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(131072)](buf16,
primals_15, buf17, buf18, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_15
buf19 = extern_kernels.convolution(buf18, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf20 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf21 = buf16
del buf16
triton_poi_fused_add_convolution_leaky_relu_7[grid(131072)](buf19,
primals_17, buf15, buf20, buf21, 131072, XBLOCK=512, num_warps=
8, num_stages=1)
del primals_17
buf22 = extern_kernels.convolution(buf21, primals_18, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf22, (4, 128, 8, 8), (8192, 64, 8, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_8[grid(32768)](buf23, primals_19,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf24 = extern_kernels.convolution(buf23, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf26 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_9[grid(65536)](buf24,
primals_21, buf25, buf26, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_21
buf27 = extern_kernels.convolution(buf26, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf27, (4, 256, 8, 8), (16384, 64, 8, 1))
buf28 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf29 = buf24
del buf24
triton_poi_fused_add_convolution_leaky_relu_10[grid(65536)](buf27,
primals_23, buf23, buf28, buf29, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf27
del primals_23
buf30 = extern_kernels.convolution(buf29, primals_24, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf30, (4, 256, 4, 4), (4096, 16, 4, 1))
buf31 = buf30
del buf30
triton_poi_fused_convolution_11[grid(16384)](buf31, primals_25,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf32 = extern_kernels.convolution(buf31, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 256, 4, 4), (4096, 16, 4, 1))
buf33 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
buf34 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_12[grid(16384)](buf32,
primals_27, buf33, buf34, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_27
buf35 = extern_kernels.convolution(buf34, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 256, 4, 4), (4096, 16, 4, 1))
buf36 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.bool
)
buf37 = buf32
del buf32
triton_poi_fused_add_convolution_leaky_relu_13[grid(16384)](buf35,
primals_29, buf31, buf36, buf37, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_29
buf38 = extern_kernels.convolution(buf37, primals_30, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=256, bias=None)
assert_size_stride(buf38, (4, 256, 8, 8), (16384, 64, 8, 1))
buf39 = reinterpret_tensor(buf19, (4, 512, 8, 8), (32768, 64, 8, 1), 0)
del buf19
triton_poi_fused_cat_14[grid(131072)](buf38, primals_31, buf29,
buf39, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_31
buf40 = extern_kernels.convolution(buf39, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf40, (4, 128, 8, 8), (8192, 64, 8, 1))
buf41 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
buf42 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_15[grid(32768)](buf40,
primals_33, buf41, buf42, 32768, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_33
buf43 = extern_kernels.convolution(buf42, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf43, (4, 128, 8, 8), (8192, 64, 8, 1))
buf44 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool
)
buf45 = buf40
del buf40
triton_poi_fused_add_convolution_leaky_relu_16[grid(32768)](buf43,
primals_35, buf39, buf44, buf45, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf43
del primals_35
buf46 = extern_kernels.convolution(buf45, primals_36, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=128, bias=None)
assert_size_stride(buf46, (4, 128, 16, 16), (32768, 256, 16, 1))
buf47 = reinterpret_tensor(buf11, (4, 256, 16, 16), (65536, 256, 16,
1), 0)
del buf11
triton_poi_fused_cat_17[grid(262144)](buf46, primals_37, buf21,
buf47, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del primals_37
buf48 = extern_kernels.convolution(buf47, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf48, (4, 64, 16, 16), (16384, 256, 16, 1))
buf49 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
buf50 = reinterpret_tensor(buf38, (4, 64, 16, 16), (16384, 256, 16,
1), 0)
del buf38
triton_poi_fused_convolution_leaky_relu_18[grid(65536)](buf48,
primals_39, buf49, buf50, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del primals_39
buf51 = extern_kernels.convolution(buf50, primals_40, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 64, 16, 16), (16384, 256, 16, 1))
buf52 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.bool)
buf53 = buf48
del buf48
triton_poi_fused_add_convolution_leaky_relu_19[grid(65536)](buf51,
primals_41, buf47, buf52, buf53, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del buf51
del primals_41
buf54 = extern_kernels.convolution(buf53, primals_42, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=64, bias=None)
assert_size_stride(buf54, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf55 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024,
32, 1), 0)
del buf3
triton_poi_fused_cat_20[grid(524288)](buf54, primals_43, buf13,
buf55, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf54
del primals_43
buf56 = extern_kernels.convolution(buf55, primals_44, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf57 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
buf58 = reinterpret_tensor(buf46, (4, 32, 32, 32), (32768, 1024, 32,
1), 0)
del buf46
triton_poi_fused_convolution_leaky_relu_21[grid(131072)](buf56,
primals_45, buf57, buf58, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_45
buf59 = extern_kernels.convolution(buf58, primals_46, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf59, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf60 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.bool)
buf61 = buf56
del buf56
triton_poi_fused_add_convolution_leaky_relu_22[grid(131072)](buf59,
primals_47, buf55, buf60, buf61, 131072, XBLOCK=1024, num_warps
=4, num_stages=1)
del buf59
del primals_47
buf62 = extern_kernels.convolution(buf61, primals_48, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=32, bias=None)
assert_size_stride(buf62, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf63 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_23[grid(1048576)](buf62, primals_49, buf5,
buf63, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del buf62
del primals_49
buf64 = extern_kernels.convolution(buf63, primals_50, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf65 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
buf66 = reinterpret_tensor(buf35, (4, 1, 64, 64), (4096, 4096, 64,
1), 0)
del buf35
triton_poi_fused_convolution_leaky_relu_24[grid(16384)](buf64,
primals_51, buf65, buf66, 16384, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_51
buf67 = extern_kernels.convolution(buf66, primals_52, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 1, 64, 64), (4096, 4096, 64, 1))
buf68 = empty_strided_cuda((4, 1, 64, 64), (4096, 4096, 64, 1),
torch.bool)
buf69 = buf64
del buf64
triton_poi_fused_add_convolution_leaky_relu_25[grid(16384)](buf67,
primals_53, buf63, buf68, buf69, 16384, XBLOCK=128, num_warps=4,
num_stages=1)
del buf67
del primals_53
return (buf69, primals_1, primals_2, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, primals_48,
primals_50, primals_52, buf1, buf2, buf4, buf5, buf7, buf9, buf10,
buf12, buf13, buf15, buf17, buf18, buf20, buf21, buf23, buf25,
buf26, buf28, buf29, buf31, buf33, buf34, buf36, buf37, buf39,
buf41, buf42, buf44, buf45, buf47, buf49, buf50, buf52, buf53,
buf55, buf57, buf58, buf60, buf61, buf63, buf65, buf66, buf68)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, norm=
'batch', residual=True, activation='leakyrelu', transpose=False):
super(ConvBlock, self).__init__()
self.dropout = dropout
self.residual = residual
self.activation = activation
self.transpose = transpose
if self.dropout:
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.05)
self.norm1 = None
self.norm2 = None
if norm == 'batch':
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm == 'instance':
self.norm1 = nn.InstanceNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'mixed':
self.norm1 = nn.BatchNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
if self.transpose:
self.conv1 = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, padding=1)
self.conv2 = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=3, padding=1)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=
3, padding=1)
if self.activation == 'relu':
self.actfun1 = nn.ReLU()
self.actfun2 = nn.ReLU()
elif self.activation == 'leakyrelu':
self.actfun1 = nn.LeakyReLU()
self.actfun2 = nn.LeakyReLU()
elif self.activation == 'elu':
self.actfun1 = nn.ELU()
self.actfun2 = nn.ELU()
elif self.activation == 'selu':
self.actfun1 = nn.SELU()
self.actfun2 = nn.SELU()
def forward(self, x):
ox = x
x = self.conv1(x)
if self.dropout:
x = self.dropout1(x)
if self.norm1:
x = self.norm1(x)
x = self.actfun1(x)
x = self.conv2(x)
if self.dropout:
x = self.dropout2(x)
if self.norm2:
x = self.norm2(x)
if self.residual:
x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox.
shape[1], x.shape[1]), :, :]
x = self.actfun2(x)
return x
class UnetNew(nn.Module):
def __init__(self, n_channel_in=1, n_channel_out=1, residual=False,
down='conv', up='tconv', activation='selu'):
super(UnetNew, self).__init__()
self.residual = residual
if down == 'maxpool':
self.down1 = nn.MaxPool2d(kernel_size=2)
self.down2 = nn.MaxPool2d(kernel_size=2)
self.down3 = nn.MaxPool2d(kernel_size=2)
self.down4 = nn.MaxPool2d(kernel_size=2)
elif down == 'avgpool':
self.down1 = nn.AvgPool2d(kernel_size=2)
self.down2 = nn.AvgPool2d(kernel_size=2)
self.down3 = nn.AvgPool2d(kernel_size=2)
self.down4 = nn.AvgPool2d(kernel_size=2)
elif down == 'conv':
self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32)
self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64)
self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25
self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25
self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25
self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25
self.down1.bias.data = 0.01 * self.down1.bias.data + 0
self.down2.bias.data = 0.01 * self.down2.bias.data + 0
self.down3.bias.data = 0.01 * self.down3.bias.data + 0
self.down4.bias.data = 0.01 * self.down4.bias.data + 0
if up == 'bilinear' or up == 'nearest':
self.up1 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up2 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up3 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
self.up4 = lambda x: nn.functional.interpolate(x, mode=up,
scale_factor=2)
elif up == 'tconv':
self.up1 = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.up2 = nn.ConvTranspose2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.up3 = nn.ConvTranspose2d(64, 64, kernel_size=2, stride=2,
groups=64)
self.up4 = nn.ConvTranspose2d(32, 32, kernel_size=2, stride=2,
groups=32)
self.up1.weight.data = 0.01 * self.up1.weight.data + 0.25
self.up2.weight.data = 0.01 * self.up2.weight.data + 0.25
self.up3.weight.data = 0.01 * self.up3.weight.data + 0.25
self.up4.weight.data = 0.01 * self.up4.weight.data + 0.25
self.up1.bias.data = 0.01 * self.up1.bias.data + 0
self.up2.bias.data = 0.01 * self.up2.bias.data + 0
self.up3.bias.data = 0.01 * self.up3.bias.data + 0
self.up4.bias.data = 0.01 * self.up4.bias.data + 0
self.conv1 = ConvBlock(n_channel_in, 32, residual, activation)
self.conv2 = ConvBlock(32, 64, residual, activation)
self.conv3 = ConvBlock(64, 128, residual, activation)
self.conv4 = ConvBlock(128, 256, residual, activation)
self.conv5 = ConvBlock(256, 256, residual, activation)
self.conv6 = ConvBlock(2 * 256, 128, residual, activation)
self.conv7 = ConvBlock(2 * 128, 64, residual, activation)
self.conv8 = ConvBlock(2 * 64, 32, residual, activation)
self.conv9 = ConvBlock(2 * 32, n_channel_out, residual, activation)
if self.residual:
self.convres = ConvBlock(n_channel_in, n_channel_out, residual,
activation)
def forward(self, input_0):
primals_6 = self.down1.weight
primals_3 = self.down1.bias
primals_12 = self.down2.weight
primals_9 = self.down2.bias
primals_18 = self.down3.weight
primals_15 = self.down3.bias
primals_24 = self.down4.weight
primals_21 = self.down4.bias
primals_30 = self.up1.weight
primals_23 = self.up1.bias
primals_36 = self.up2.weight
primals_17 = self.up2.bias
primals_42 = self.up3.weight
primals_11 = self.up3.bias
primals_48 = self.up4.weight
primals_5 = self.up4.bias
primals_2 = self.conv1.conv1.weight
primals_7 = self.conv1.conv1.bias
primals_4 = self.conv1.conv2.weight
primals_45 = self.conv1.conv2.bias
primals_8 = self.conv2.conv1.weight
primals_13 = self.conv2.conv1.bias
primals_10 = self.conv2.conv2.weight
primals_39 = self.conv2.conv2.bias
primals_14 = self.conv3.conv1.weight
primals_19 = self.conv3.conv1.bias
primals_16 = self.conv3.conv2.weight
primals_33 = self.conv3.conv2.bias
primals_20 = self.conv4.conv1.weight
primals_25 = self.conv4.conv1.bias
primals_22 = self.conv4.conv2.weight
primals_27 = self.conv4.conv2.bias
primals_26 = self.conv5.conv1.weight
primals_29 = self.conv5.conv1.bias
primals_28 = self.conv5.conv2.weight
primals_31 = self.conv5.conv2.bias
primals_32 = self.conv6.conv1.weight
primals_35 = self.conv6.conv1.bias
primals_34 = self.conv6.conv2.weight
primals_37 = self.conv6.conv2.bias
primals_38 = self.conv7.conv1.weight
primals_41 = self.conv7.conv1.bias
primals_40 = self.conv7.conv2.weight
primals_43 = self.conv7.conv2.bias
primals_44 = self.conv8.conv1.weight
primals_47 = self.conv8.conv1.bias
primals_46 = self.conv8.conv2.weight
primals_49 = self.conv8.conv2.bias
primals_50 = self.conv9.conv1.weight
primals_51 = self.conv9.conv1.bias
primals_52 = self.conv9.conv2.weight
primals_53 = self.conv9.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53])
return output[0]
| XinweiYu/noise2self | Unet | false | 3,008 | [
"MIT"
] | 0 | 04e0379a67e1cb0c807abd3f8d4fd1666db5a793 | https://github.com/XinweiYu/noise2self/tree/04e0379a67e1cb0c807abd3f8d4fd1666db5a793 | import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, dropout=False, norm=
'batch', residual=True, activation='leakyrelu', transpose=False):
super().__init__()
self.dropout = dropout
self.residual = residual
self.activation = activation
self.transpose = transpose
if self.dropout:
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.05)
self.norm1 = None
self.norm2 = None
if norm == 'batch':
self.norm1 = nn.BatchNorm2d(out_channels)
self.norm2 = nn.BatchNorm2d(out_channels)
elif norm == 'instance':
self.norm1 = nn.InstanceNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
elif norm == 'mixed':
self.norm1 = nn.BatchNorm2d(out_channels, affine=True)
self.norm2 = nn.InstanceNorm2d(out_channels, affine=True)
if self.transpose:
self.conv1 = nn.ConvTranspose2d(in_channels, out_channels,
kernel_size=3, padding=1)
self.conv2 = nn.ConvTranspose2d(out_channels, out_channels,
kernel_size=3, padding=1)
else:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=
3, padding=1)
if self.activation == 'relu':
self.actfun1 = nn.ReLU()
self.actfun2 = nn.ReLU()
elif self.activation == 'leakyrelu':
self.actfun1 = nn.LeakyReLU()
self.actfun2 = nn.LeakyReLU()
elif self.activation == 'elu':
self.actfun1 = nn.ELU()
self.actfun2 = nn.ELU()
elif self.activation == 'selu':
self.actfun1 = nn.SELU()
self.actfun2 = nn.SELU()
def forward(self, x):
ox = x
x = self.conv1(x)
if self.dropout:
x = self.dropout1(x)
if self.norm1:
x = self.norm1(x)
x = self.actfun1(x)
x = self.conv2(x)
if self.dropout:
x = self.dropout2(x)
if self.norm2:
x = self.norm2(x)
if self.residual:
x[:, 0:min(ox.shape[1], x.shape[1]), :, :] += ox[:, 0:min(ox.
shape[1], x.shape[1]), :, :]
x = self.actfun2(x)
return x
class Model(nn.Module):
def __init__(self, n_channel_in=1, n_channel_out=1, residual=False,
down='conv', up='tconv', activation='selu'):
super().__init__()
self.residual = residual
if down == 'maxpool':
self.down1 = nn.MaxPool2d(kernel_size=2)
self.down2 = nn.MaxPool2d(kernel_size=2)
self.down3 = nn.MaxPool2d(kernel_size=2)
self.down4 = nn.MaxPool2d(kernel_size=2)
elif down == 'avgpool':
self.down1 = nn.AvgPool2d(kernel_size=2)
self.down2 = nn.AvgPool2d(kernel_size=2)
self.down3 = nn.AvgPool2d(kernel_size=2)
self.down4 = nn.AvgPool2d(kernel_size=2)
elif down == 'conv':
self.down1 = nn.Conv2d(32, 32, kernel_size=2, stride=2, groups=32)
self.down2 = nn.Conv2d(64, 64, kernel_size=2, stride=2, groups=64)
self.down3 = nn.Conv2d(128, 128, kernel_size=2, stride=2,
groups=128)
self.down4 = nn.Conv2d(256, 256, kernel_size=2, stride=2,
groups=256)
self.down1.weight.data = 0.01 * self.down1.weight.data + 0.25
self.down2.weight.data = 0.01 * self.down2.weight.data + 0.25
self.down3.weight.data = 0.01 * self.down3.weight.data + 0.25
self.down4.weight.data = 0.01 * self.down4.weight.data + 0.25
self.down1.bias.data = 0.01 * self.down1.bias.data + 0
self.down2.bias.data = 0.01 * self.down2.
# ... truncated (>4000 chars) for memory efficiency |
ResNetV2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/a2/ca2l7bjxfwrklzvcxfa2hnyzqh3p6neak37vi6fkugdhbu26fbpz.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (147*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5b/c5brnjme4e4oybuabwsko4vuljormwjqoawce7jgxo5fbkhzx55r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 12
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = (yindex // 3)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/co/ccosum7u5lx5fx5hf5opofiygxj2ntiq67yo5gfegevmhtkaru4r.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 65536
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = (yindex // 256)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qg/cqg4z653mpzmif22rwtpmv42y4lbkkxhxjqguwoxl3wb6cn5fn7k.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 262144
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (4608*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ot/cotn5a2cqhwvdw4ugt6b2a4jl2ou2mh37mnmwxgwogdqw4kcufhp.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1048576
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = (yindex // 1024)
tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (1024*x2) + (9216*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bh/cbh2ag2v7orygb3ziuscjd5uid7vytws6fgziabekhv6dedo4sah.py
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add => add
# sqrt => sqrt
# sub => sub
# var_mean => var_mean
# w => div
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_5 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (147*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp21, xmask)
tl.store(out_ptr1 + (r1 + (147*x0)), tmp23, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qd/cqdfjpoxnsihncv4uccxtrnfsf3ql6wvc6wgwpd2fj5i6yyxupol.py
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# input_2 => add_1, rsqrt, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-06), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
triton_red_fused_native_group_norm_6 = async_compile.triton('triton_red_fused_native_group_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = (rindex // 8)
tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (262144*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pj/cpjraohhskikqkpyyhuz5sq72665tvzjie254ptm3wjt4sw363dw.py
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# input_2 => add_2, mul_1
# input_3 => relu
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_2,), kwargs = {})
triton_poi_fused_native_group_norm_relu_7 = async_compile.triton('triton_poi_fused_native_group_norm_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = (xindex // 262144)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/en/cenibl6xufmpsxvh6eyb2umnz2kxwu7iax4uwtef7x45ck3jiyqm.py
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# input_4 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=3] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = (xindex // 256) % 15
x2 = (xindex // 3840) % 15
x3 = (xindex // 57600)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp5 = tl.load(in_ptr0 + (8192 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp7 = tl.load(in_ptr0 + (8448 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp9 = tl.load(in_ptr0 + (8704 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp11 = tl.load(in_ptr0 + (16384 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp13 = tl.load(in_ptr0 + (16640 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp15 = tl.load(in_ptr0 + (16896 + x0 + (512*x1) + (16384*x2) + (262144*x3)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x4), tmp16, xmask)
tl.store(out_ptr1 + (x4), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7e/c7eyyakbx3wwdxkitvcdctegijacmtfs3f5mwqlotacqy2za6isi.py
# Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_1 => add_3
# sqrt_1 => sqrt_1
# sub_1 => sub_2
# var_mean_1 => var_mean_2
# w_1 => div_1
# Graph fragment:
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_5, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_5, %getitem_7), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %sqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_3,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %sqrt_1), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_9 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1024, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1024
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/nh/cnhs4vcr657vluxkvtqufylpkhx3e46cwffrst5vsuhqb5bee2hm.py
# Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# residual_1 => add_4, rsqrt_1, var_mean_3
# Graph fragment:
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_per_fused_native_group_norm_10 = async_compile.triton('triton_per_fused_native_group_norm_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4096, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4096
rnumel = 225
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 1024
x1 = (xindex // 1024)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (1024*r2) + (230400*x1)), rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 225, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 225.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + (x3), tmp21, None)
tl.store(out_ptr0 + (x3), tmp10, None)
tl.store(out_ptr1 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jp/cjpyntupcqudw3ggo5knfj7ch7ghxyjxqolntzzvjzgk5am7l5vv.py
# Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_2 => add_6
# sqrt_2 => sqrt_2
# sub_2 => sub_4
# var_mean_2 => var_mean_4
# w_2 => div_2
# Graph fragment:
# %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_8, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_8, %getitem_11), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_10, 1e-05), kwargs = {})
# %sqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_6,), kwargs = {})
# %div_2 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_4, %sqrt_2), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_11 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 256
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (256*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (256*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hj/chj6ouuwknkysfru4ls3ofhe7alveqoisdmvuaoevumnq4wt2k6w.py
# Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_2 => add_7, rsqrt_2, var_mean_5
# Graph fragment:
# %var_mean_5 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_12, 1e-06), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_7,), kwargs = {})
triton_red_fused_native_group_norm_12 = async_compile.triton('triton_red_fused_native_group_norm_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 1800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = (rindex // 8)
tmp0 = tl.load(in_ptr0 + (r2 + (8*x0) + (256*r3) + (57600*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 1800.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oj/cojzk2u64jku6zsbuoyuudooypcp7v4wisckmjekrx7w47ide3nn.py
# Topologically Sorted Source Nodes: [group_norm_2, y], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_2 => add_8, mul_5
# y => relu_1
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, %unsqueeze_17), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %unsqueeze_14), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_8,), kwargs = {})
triton_poi_fused_native_group_norm_relu_13 = async_compile.triton('triton_poi_fused_native_group_norm_relu_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 256
x2 = (xindex // 57600)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 8)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 8)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1800.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/au/cauwci26izb7zaj7j2kns6emmf3imcuzuc7mya2u5lekb2a4uxvi.py
# Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_3 => add_9
# sqrt_3 => sqrt_3
# sub_3 => sub_6
# var_mean_3 => var_mean_6
# w_3 => div_3
# Graph fragment:
# %var_mean_6 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_11, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_11, %getitem_15), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_14, 1e-05), kwargs = {})
# %sqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_9,), kwargs = {})
# %div_3 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_6, %sqrt_3), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_14 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[256, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 256
rnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2304.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2304*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2304*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/j6/cj6u2qoik6ggh7klfu5csrib7fmrd5bj3x7ohmjq4r3slsaz6opp.py
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# y_2 => add_13, rsqrt_4, var_mean_9
# Graph fragment:
# %var_mean_9 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_8, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_20, 1e-06), kwargs = {})
# %rsqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_13,), kwargs = {})
triton_red_fused_native_group_norm_15 = async_compile.triton('triton_red_fused_native_group_norm_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (230400*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 7200.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/n3/cn3wqwtcg63qqjfryzmmh4maj6kopa4zq2o4fttlkcmvufb32tlf.py
# Topologically Sorted Source Nodes: [residual_1, y_2, add_5, y_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
# Source node to ATen node mapping:
# add_5 => add_15
# residual_1 => add_5, mul_3
# y_2 => add_14, mul_9
# y_3 => relu_3
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, %unsqueeze_29), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_9, %unsqueeze_26), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_14), kwargs = {})
# %relu_3 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_15,), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_16 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr):
xnumel = 921600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 230400)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (1024*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (1024*x2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x3), None)
tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 225.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 7200.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/za/cza7xohy5huey54ued5pdquhp4jsmdldpvdhl7u6h6evhljn6ntg.py
# Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_6 => add_16
# sqrt_5 => sqrt_5
# sub_5 => sub_10
# var_mean_5 => var_mean_10
# w_5 => div_5
# Graph fragment:
# %var_mean_10 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_17, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_17, %getitem_23), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_22, 1e-05), kwargs = {})
# %sqrt_5 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_16,), kwargs = {})
# %div_5 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_10, %sqrt_5), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_17 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 256
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qv/cqv2al4zrzdetvahtrxpbboyj4cdfrtikullucdr7ninla2ojfi3.py
# Topologically Sorted Source Nodes: [y_6, add_9, y_7], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
# Source node to ATen node mapping:
# add_9 => add_25
# y_6 => add_24, mul_15
# y_7 => relu_6
# Graph fragment:
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, %unsqueeze_47), kwargs = {})
# %add_24 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_15, %unsqueeze_44), kwargs = {})
# %add_25 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_3, %add_24), kwargs = {})
# %relu_6 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_25,), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_18 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 921600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 230400)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 7200.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/3e/c3e3jkfbcq52r6g6b6cbrryxsycoi3yklyx24onnuazrjrv64isp.py
# Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_18 => add_46
# sqrt_14 => sqrt_14
# sub_14 => sub_28
# var_mean_14 => var_mean_28
# w_14 => div_14
# Graph fragment:
# %var_mean_28 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_44, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_44, %getitem_59), kwargs = {})
# %add_46 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_58, 1e-05), kwargs = {})
# %sqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_46,), kwargs = {})
# %div_14 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_28, %sqrt_14), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_19 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[2048, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 2048
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gq/cgqqbj6ixr6xvohoqin2rmwkifwz3m65ciqtowyt5fhop4xj6y7o.py
# Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# residual_3 => add_47, rsqrt_14, var_mean_29
# Graph fragment:
# %var_mean_29 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_28, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_47 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_60, 1e-05), kwargs = {})
# %rsqrt_14 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_47,), kwargs = {})
triton_per_fused_native_group_norm_20 = async_compile.triton('triton_per_fused_native_group_norm_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[8192, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 8192
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 2048
x1 = (xindex // 2048)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (2048*r2) + (131072*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 64.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x3), tmp18, None)
tl.store(out_ptr0 + (x3), tmp8, None)
tl.store(out_ptr1 + (x3), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jz/cjzjlifmtzexhth3v5uhemdz6lruuvuube75oxni47hhafmbjogo.py
# Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_19 => add_49
# sqrt_15 => sqrt_15
# sub_15 => sub_30
# var_mean_15 => var_mean_30
# w_15 => div_15
# Graph fragment:
# %var_mean_30 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_47, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_47, %getitem_63), kwargs = {})
# %add_49 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_62, 1e-05), kwargs = {})
# %sqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_49,), kwargs = {})
# %div_15 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_30, %sqrt_15), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_21 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wr/cwrn5excbg56ob6bzzhwtavbqfqb7pvzdegt7kp4lbgagsgrr2bd.py
# Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_15 => add_50, rsqrt_15, var_mean_31
# Graph fragment:
# %var_mean_31 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_30, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_50 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_64, 1e-06), kwargs = {})
# %rsqrt_15 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_50,), kwargs = {})
triton_red_fused_native_group_norm_22 = async_compile.triton('triton_red_fused_native_group_norm_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 3600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (115200*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 3600.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/d4/cd427eloy2yjw2svol6vip5ycllxrhwgof6m2c6aujoopvoctvdd.py
# Topologically Sorted Source Nodes: [group_norm_15, y_16], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_15 => add_51, mul_31
# y_16 => relu_13
# Graph fragment:
# %mul_31 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_31, %unsqueeze_95), kwargs = {})
# %add_51 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_31, %unsqueeze_92), kwargs = {})
# %relu_13 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_51,), kwargs = {})
triton_poi_fused_native_group_norm_relu_23 = async_compile.triton('triton_poi_fused_native_group_norm_relu_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = (xindex // 115200)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 3600.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ml/cmlxchcvym4okka7o4w3dtz75nwcpens7klv7see6gzrgjru4crn.py
# Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_20 => add_52
# sqrt_16 => sqrt_16
# sub_16 => sub_32
# var_mean_16 => var_mean_32
# w_16 => div_16
# Graph fragment:
# %var_mean_32 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_50, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_50, %getitem_67), kwargs = {})
# %add_52 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_66, 1e-05), kwargs = {})
# %sqrt_16 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_52,), kwargs = {})
# %div_16 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_32, %sqrt_16), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_24 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[512, 8192],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_24', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 512
rnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 4608.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (4608*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (4608*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ql/cqlfabfzu7z7frymncc5rvxsuisyioetxpzwyizodqojh3qbf3ww.py
# Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_16 => add_53, rsqrt_16, var_mean_33
# Graph fragment:
# %var_mean_33 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_32, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_53 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_68, 1e-06), kwargs = {})
# %rsqrt_16 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_53,), kwargs = {})
triton_per_fused_native_group_norm_25 = async_compile.triton('triton_per_fused_native_group_norm_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_25', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = (rindex // 16)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (16*x0) + (512*r3) + (32768*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/je/cje4qmnqbitm4amlfxfa3mdxapvvr5hptd7oqfvjtooh5sfigskl.py
# Topologically Sorted Source Nodes: [group_norm_16, y_17], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_16 => add_54, mul_33
# y_17 => relu_14
# Graph fragment:
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_33, %unsqueeze_101), kwargs = {})
# %add_54 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_33, %unsqueeze_98), kwargs = {})
# %relu_14 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_54,), kwargs = {})
triton_poi_fused_native_group_norm_relu_26 = async_compile.triton('triton_poi_fused_native_group_norm_relu_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = (xindex // 32768)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 16)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pa/cpapwllspqbtgblftrbrapxpxfewknnyrkauwrnhp7qhdxyjnjyq.py
# Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_21 => add_55
# sqrt_17 => sqrt_17
# sub_17 => sub_34
# var_mean_17 => var_mean_34
# w_17 => div_17
# Graph fragment:
# %var_mean_34 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_53, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_53, %getitem_71), kwargs = {})
# %add_55 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_70, 1e-05), kwargs = {})
# %sqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_55,), kwargs = {})
# %div_17 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_34, %sqrt_17), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_27 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[2048, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_27', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 2048
XBLOCK: tl.constexpr = 1
rnumel = 512
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (512*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (512*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/mn/cmnjsvtayefkonejpy7zk4q2mcsbvzyofwxkaeldnf5t7ebemkif.py
# Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# y_18 => add_56, rsqrt_17, var_mean_35
# Graph fragment:
# %var_mean_35 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_34, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_56 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_72, 1e-06), kwargs = {})
# %rsqrt_17 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_56,), kwargs = {})
triton_red_fused_native_group_norm_28 = async_compile.triton('triton_red_fused_native_group_norm_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r2 + (64*x0) + (2048*r3) + (131072*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ji/cjijxahsz2tpalc64o6xljydbultwefrwydw7xdce475v6s6ttfb.py
# Topologically Sorted Source Nodes: [residual_3, y_18, add_22, y_19], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
# Source node to ATen node mapping:
# add_22 => add_58
# residual_3 => add_48, mul_29
# y_18 => add_57, mul_35
# y_19 => relu_15
# Graph fragment:
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_29, %unsqueeze_89), kwargs = {})
# %add_48 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_29, %unsqueeze_86), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_35, %unsqueeze_107), kwargs = {})
# %add_57 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_35, %unsqueeze_104), kwargs = {})
# %add_58 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_48, %add_57), kwargs = {})
# %relu_15 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_58,), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_29 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_29', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 131072)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (2048*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (2048*x2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x3), None)
tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 4096.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e5/ce5e725kxvl6jl7eeekitaoe56q5reesvisbzhqldhffeu2qrkfc.py
# Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_23 => add_59
# sqrt_18 => sqrt_18
# sub_18 => sub_36
# var_mean_18 => var_mean_36
# w_18 => div_18
# Graph fragment:
# %var_mean_36 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_56, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_56, %getitem_75), kwargs = {})
# %add_59 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_74, 1e-05), kwargs = {})
# %sqrt_18 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_59,), kwargs = {})
# %div_18 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_36, %sqrt_18), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_30 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[512, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_30', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 512
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k3/ck3bygbjqyc7l2hv4slwiesyb5h7v6uvamlqu24x7fojtgjyysbk.py
# Topologically Sorted Source Nodes: [y_22, add_26, y_23], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
# Source node to ATen node mapping:
# add_26 => add_68
# y_22 => add_67, mul_41
# y_23 => relu_18
# Graph fragment:
# %mul_41 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_41, %unsqueeze_125), kwargs = {})
# %add_67 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_41, %unsqueeze_122), kwargs = {})
# %add_68 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_15, %add_67), kwargs = {})
# %relu_18 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_68,), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_31 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = (xindex // 131072)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 64)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 4096.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/w7/cw7buxayjszrffyrdccjcbcic36lffdaj7qocb2wpcxz4k46hhci.py
# Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_35 => add_89
# sqrt_27 => sqrt_27
# sub_27 => sub_54
# var_mean_27 => var_mean_54
# w_27 => div_27
# Graph fragment:
# %var_mean_54 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_83, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_54 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_83, %getitem_111), kwargs = {})
# %add_89 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_110, 1e-05), kwargs = {})
# %sqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_89,), kwargs = {})
# %div_27 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_54, %sqrt_27), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_32 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[4096, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_32', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 4096
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/a5/ca5qmfavufo2qfs3p4ctwkbe7k366d267lmsw7ioa4nlu6dbqihs.py
# Topologically Sorted Source Nodes: [residual_5], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# residual_5 => add_90, rsqrt_27, var_mean_55
# Graph fragment:
# %var_mean_55 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_54, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_90 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_112, 1e-05), kwargs = {})
# %rsqrt_27 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_90,), kwargs = {})
triton_per_fused_native_group_norm_33 = async_compile.triton('triton_per_fused_native_group_norm_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16384, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_33', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16384
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = (xindex // 4096)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4096*r2) + (65536*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 16.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x3), tmp18, None)
tl.store(out_ptr0 + (x3), tmp8, None)
tl.store(out_ptr1 + (x3), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/i5/ci5mqnd52qvyrtjersam7awijmxjmjmms53bnijcvka3s3m4j7xb.py
# Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_36 => add_92
# sqrt_28 => sqrt_28
# sub_28 => sub_56
# var_mean_28 => var_mean_56
# w_28 => div_28
# Graph fragment:
# %var_mean_56 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_86, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_56 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_86, %getitem_115), kwargs = {})
# %add_92 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_114, 1e-05), kwargs = {})
# %sqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_92,), kwargs = {})
# %div_28 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_56, %sqrt_28), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_34 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1024, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_34', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1024
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (2048*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (2048*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ta/ctaihcjspdz63grwe4pfyyqfgwiuwoinc6dsakq2ggu274bly3er.py
# Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_28 => add_93, rsqrt_28, var_mean_57
# Graph fragment:
# %var_mean_57 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_56, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_93 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_116, 1e-06), kwargs = {})
# %rsqrt_28 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_93,), kwargs = {})
triton_red_fused_native_group_norm_35 = async_compile.triton('triton_red_fused_native_group_norm_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = (rindex // 32)
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xi/cxiqxvbsz3ukssdeoyptshvpqvhiwaaf4kbrrdv5nfaasaybrl33.py
# Topologically Sorted Source Nodes: [group_norm_28, y_32], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_28 => add_94, mul_57
# y_32 => relu_25
# Graph fragment:
# %mul_57 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_57, %unsqueeze_173), kwargs = {})
# %add_94 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_57, %unsqueeze_170), kwargs = {})
# %relu_25 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_94,), kwargs = {})
triton_poi_fused_native_group_norm_relu_36 = async_compile.triton('triton_poi_fused_native_group_norm_relu_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ie/ciexv24o44oxfqehcf5g5n3uw7wm3rwzhg6j6yp6xehzai7x3ppj.py
# Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_37 => add_95
# sqrt_29 => sqrt_29
# sub_29 => sub_58
# var_mean_29 => var_mean_58
# w_29 => div_29
# Graph fragment:
# %var_mean_58 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_89, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_58 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_89, %getitem_119), kwargs = {})
# %add_95 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_118, 1e-05), kwargs = {})
# %sqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_95,), kwargs = {})
# %div_29 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_58, %sqrt_29), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_37 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_37', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1024, 16384],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_37', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1024
rnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 9216.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (9216*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (9216*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yg/cygphweqnq7pnwvkahvghd3g67zml3uufl6pze324yvsgxttpecs.py
# Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# group_norm_29 => add_96, rsqrt_29, var_mean_59
# Graph fragment:
# %var_mean_59 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_58, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_96 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_120, 1e-06), kwargs = {})
# %rsqrt_29 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_96,), kwargs = {})
triton_per_fused_native_group_norm_38 = async_compile.triton('triton_per_fused_native_group_norm_38', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[128, 512],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_38', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 128
XBLOCK: tl.constexpr = 1
rnumel = 512
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = (rindex // 32)
x0 = xindex % 32
x1 = (xindex // 32)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (32*x0) + (1024*r3) + (16384*x1)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + (x4), tmp18, None)
tl.store(out_ptr0 + (x4), tmp8, None)
tl.store(out_ptr1 + (x4), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/l4/cl4rrnjrwmvzdp3wuhtfdwp35f7iqe3swf2ufqxhvpdgyd2ns24f.py
# Topologically Sorted Source Nodes: [group_norm_29, y_33], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm_29 => add_97, mul_59
# y_33 => relu_26
# Graph fragment:
# %mul_59 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_59, %unsqueeze_179), kwargs = {})
# %add_97 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_59, %unsqueeze_176), kwargs = {})
# %relu_26 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_97,), kwargs = {})
triton_poi_fused_native_group_norm_relu_39 = async_compile.triton('triton_poi_fused_native_group_norm_relu_39', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_group_norm_relu_39', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = (xindex // 16384)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + ((32*x2) + (x0 // 32)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5p/c5po2fxloubqkx7bxv5xz6kvdz6v4qojkssixn4dxk7vmlqdmvuc.py
# Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_38 => add_98
# sqrt_30 => sqrt_30
# sub_30 => sub_60
# var_mean_30 => var_mean_60
# w_30 => div_30
# Graph fragment:
# %var_mean_60 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_92, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_60 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_92, %getitem_123), kwargs = {})
# %add_98 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_122, 1e-05), kwargs = {})
# %sqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_98,), kwargs = {})
# %div_30 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_60, %sqrt_30), kwargs = {})
triton_per_fused_add_div_sqrt_sub_var_mean_40 = async_compile.triton('triton_per_fused_add_div_sqrt_sub_var_mean_40', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4096, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_sqrt_sub_var_mean_40', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 4096
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (1024*x0)), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp18, None)
tl.store(out_ptr1 + (r1 + (1024*x0)), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wp/cwpa5qp2ta7tnws6bdr5vxs47sehbs5clrraba3gd4l7tispuaoa.py
# Topologically Sorted Source Nodes: [y_34], Original ATen: [aten.native_group_norm]
# Source node to ATen node mapping:
# y_34 => add_99, rsqrt_30, var_mean_61
# Graph fragment:
# %var_mean_61 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_60, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_99 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_124, 1e-06), kwargs = {})
# %rsqrt_30 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_99,), kwargs = {})
triton_red_fused_native_group_norm_41 = async_compile.triton('triton_red_fused_native_group_norm_41', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 2048],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_group_norm_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = (xindex // 32)
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = (rindex // 128)
tmp0 = tl.load(in_ptr0 + (r2 + (128*x0) + (4096*r3) + (65536*x1)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tl.store(out_ptr0 + (x4), tmp2, xmask)
tl.store(out_ptr1 + (x4), tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + (x4), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zw/czwdegbzheopnvwdl4eqnhwujnatwuxmgcovtppktuchxyyj6edp.py
# Topologically Sorted Source Nodes: [residual_5, y_34, add_39, y_35], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
# Source node to ATen node mapping:
# add_39 => add_101
# residual_5 => add_91, mul_55
# y_34 => add_100, mul_61
# y_35 => relu_27
# Graph fragment:
# %mul_55 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_55, %unsqueeze_167), kwargs = {})
# %add_91 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_55, %unsqueeze_164), kwargs = {})
# %mul_61 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_61, %unsqueeze_185), kwargs = {})
# %add_100 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_61, %unsqueeze_182), kwargs = {})
# %add_101 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_91, %add_100), kwargs = {})
# %relu_27 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_101,), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_42 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_42', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_42', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x0 + (4096*x2)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (4096*x2)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + (x3), None)
tmp15 = tl.load(in_ptr6 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr7 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr8 + (x0), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 2048.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + (x3), tmp30, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fc/cfccvugrjb5bchp7busa35nnjngrxenithy3hzc3iqtipvpmnaq7.py
# Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# add_40 => add_102
# sqrt_31 => sqrt_31
# sub_31 => sub_62
# var_mean_31 => var_mean_62
# w_31 => div_31
# Graph fragment:
# %var_mean_62 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_95, [1, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %sub_62 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_95, %getitem_127), kwargs = {})
# %add_102 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_126, 1e-05), kwargs = {})
# %sqrt_31 : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_102,), kwargs = {})
# %div_31 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_62, %sqrt_31), kwargs = {})
triton_red_fused_add_div_sqrt_sub_var_mean_43 = async_compile.triton('triton_red_fused_add_div_sqrt_sub_var_mean_43', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[1024, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_sqrt_sub_var_mean_43', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 1024
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = triton_helpers.welford_reduce(
tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(
tmp2_mean, tmp2_m2, tmp2_weight, 1
)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4 = tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + (4096*x0)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + (4096*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/wz/cwzmw2mxocwzkaptk7ltuhxbrbaqhh3walrpi3otgnu3l4igiwew.py
# Topologically Sorted Source Nodes: [y_38, add_43, y_39], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
# Source node to ATen node mapping:
# add_43 => add_111
# y_38 => add_110, mul_67
# y_39 => relu_30
# Graph fragment:
# %mul_67 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_67, %unsqueeze_203), kwargs = {})
# %add_110 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_67, %unsqueeze_200), kwargs = {})
# %add_111 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_27, %add_110), kwargs = {})
# %relu_30 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_111,), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_44 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_44', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_44', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = (xindex // 65536)
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x3), None)
tmp2 = tl.load(in_ptr2 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + ((32*x2) + (x0 // 128)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x0), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x0), None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (x3), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qr/cqrbpncc5yhew7raztwazh5qtqmewuygbnx7jaosdzutra7m33et.py
# Topologically Sorted Source Nodes: [y_46, add_51, y_47], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
# Source node to ATen node mapping:
# add_51 => add_131
# y_46 => add_130, mul_79
# y_47 => relu_36
# Graph fragment:
# %mul_79 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_79, %unsqueeze_239), kwargs = {})
# %add_130 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_79, %unsqueeze_236), kwargs = {})
# %add_131 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu_33, %add_130), kwargs = {})
# %relu_36 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_131,), kwargs = {})
triton_poi_fused_add_native_group_norm_relu_45 = async_compile.triton('triton_poi_fused_add_native_group_norm_relu_45', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_group_norm_relu_45', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = (yindex // 16)
y0 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + ((32*y1) + (x2 // 128)), ymask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + ((32*y1) + (x2 // 128)), ymask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + (x2), None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1, 1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (y0 + (16*x2) + (65536*y1)), tmp17, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ui/cuic7o4vbk3ddfcahfstl2n4yrl6azuxcyoqipnublq7krxibcnu.py
# Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward]
# Source node to ATen node mapping:
# Graph fragment:
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_36, 0), kwargs = {})
triton_poi_fused_threshold_backward_46 = async_compile.triton('triton_poi_fused_threshold_backward_46', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_backward_46', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16384
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = (yindex // 4096)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (y0 + (4096*x2) + (65536*y1)), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121 = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256, ), (1, ))
assert_size_stride(primals_4, (256, ), (1, ))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (1024, ), (1, ))
assert_size_stride(primals_7, (1024, ), (1, ))
assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (256, ), (1, ))
assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_12, (256, ), (1, ))
assert_size_stride(primals_13, (256, ), (1, ))
assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (1024, ), (1, ))
assert_size_stride(primals_16, (1024, ), (1, ))
assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_18, (256, ), (1, ))
assert_size_stride(primals_19, (256, ), (1, ))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256, ), (1, ))
assert_size_stride(primals_22, (256, ), (1, ))
assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_24, (1024, ), (1, ))
assert_size_stride(primals_25, (1024, ), (1, ))
assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_27, (256, ), (1, ))
assert_size_stride(primals_28, (256, ), (1, ))
assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (256, ), (1, ))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_33, (1024, ), (1, ))
assert_size_stride(primals_34, (1024, ), (1, ))
assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_36, (256, ), (1, ))
assert_size_stride(primals_37, (256, ), (1, ))
assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (256, ), (1, ))
assert_size_stride(primals_40, (256, ), (1, ))
assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_42, (1024, ), (1, ))
assert_size_stride(primals_43, (1024, ), (1, ))
assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_45, (2048, ), (1, ))
assert_size_stride(primals_46, (2048, ), (1, ))
assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_48, (512, ), (1, ))
assert_size_stride(primals_49, (512, ), (1, ))
assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_51, (512, ), (1, ))
assert_size_stride(primals_52, (512, ), (1, ))
assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_54, (2048, ), (1, ))
assert_size_stride(primals_55, (2048, ), (1, ))
assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_57, (512, ), (1, ))
assert_size_stride(primals_58, (512, ), (1, ))
assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_60, (512, ), (1, ))
assert_size_stride(primals_61, (512, ), (1, ))
assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_63, (2048, ), (1, ))
assert_size_stride(primals_64, (2048, ), (1, ))
assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_66, (512, ), (1, ))
assert_size_stride(primals_67, (512, ), (1, ))
assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_69, (512, ), (1, ))
assert_size_stride(primals_70, (512, ), (1, ))
assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_72, (2048, ), (1, ))
assert_size_stride(primals_73, (2048, ), (1, ))
assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_75, (512, ), (1, ))
assert_size_stride(primals_76, (512, ), (1, ))
assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_78, (512, ), (1, ))
assert_size_stride(primals_79, (512, ), (1, ))
assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_81, (2048, ), (1, ))
assert_size_stride(primals_82, (2048, ), (1, ))
assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_84, (4096, ), (1, ))
assert_size_stride(primals_85, (4096, ), (1, ))
assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_87, (1024, ), (1, ))
assert_size_stride(primals_88, (1024, ), (1, ))
assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_90, (1024, ), (1, ))
assert_size_stride(primals_91, (1024, ), (1, ))
assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_93, (4096, ), (1, ))
assert_size_stride(primals_94, (4096, ), (1, ))
assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_96, (1024, ), (1, ))
assert_size_stride(primals_97, (1024, ), (1, ))
assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_99, (1024, ), (1, ))
assert_size_stride(primals_100, (1024, ), (1, ))
assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_102, (4096, ), (1, ))
assert_size_stride(primals_103, (4096, ), (1, ))
assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_105, (1024, ), (1, ))
assert_size_stride(primals_106, (1024, ), (1, ))
assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_108, (1024, ), (1, ))
assert_size_stride(primals_109, (1024, ), (1, ))
assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_111, (4096, ), (1, ))
assert_size_stride(primals_112, (4096, ), (1, ))
assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_114, (1024, ), (1, ))
assert_size_stride(primals_115, (1024, ), (1, ))
assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_117, (1024, ), (1, ))
assert_size_stride(primals_118, (1024, ), (1, ))
assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_120, (4096, ), (1, ))
assert_size_stride(primals_121, (4096, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 768, 49, grid=grid(768, 49), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_2, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_11, buf2, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_11
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_20, buf3, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_20
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_29, buf4, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_29
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_38, buf5, 65536, 9, grid=grid(65536, 9), stream=stream0)
del primals_38
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_50, buf6, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_50
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_59, buf7, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_59
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_68, buf8, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_68
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_77, buf9, 262144, 9, grid=grid(262144, 9), stream=stream0)
del primals_77
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_89, buf10, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_89
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_98, buf11, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_98
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_107, buf12, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_107
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_116, buf13, 1048576, 9, grid=grid(1048576, 9), stream=stream0)
del primals_116
buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf15 # reuse
buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.float32)
# Topologically Sorted Source Nodes: [var_mean, sub, add, sqrt, w], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_5.run(buf17, buf0, buf18, 256, 147, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [input_2], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_6.run(buf19, buf20, buf21, buf23, 128, 8192, grid=grid(128), stream=stream0)
buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256), torch.float32)
# Topologically Sorted Source Nodes: [input_2, input_3], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_7.run(buf19, buf20, buf21, primals_3, primals_4, buf24, 1048576, grid=grid(1048576), stream=stream0)
del primals_4
buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.int8)
# Topologically Sorted Source Nodes: [input_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_8.run(buf24, buf25, buf26, 230400, grid=grid(230400), stream=stream0)
buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf28 # reuse
buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_1, sub_1, add_1, sqrt_1, w_1], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf30, primals_5, buf31, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [residual], Original ATen: [aten.convolution]
buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32)
buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32)
buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [residual_1], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_10.run(buf32, buf33, buf34, buf36, 4096, 225, grid=grid(4096), stream=stream0)
buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf38 # reuse
buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_2, sub_2, add_2, sqrt_2, w_2], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_11.run(buf40, primals_8, buf41, 256, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf43 = buf21; del buf21 # reuse
buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_2], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf42, buf43, buf44, buf46, 128, 1800, grid=grid(128), stream=stream0)
buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_2, y], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf42, buf43, buf44, primals_9, primals_10, buf47, 230400, grid=grid(230400), stream=stream0)
del primals_10
buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf49 # reuse
buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_3, sub_3, add_3, sqrt_3, w_3], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf51, buf2, buf52, 256, 2304, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf54 = buf44; del buf44 # reuse
buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_3], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf53, buf54, buf55, buf57, 128, 1800, grid=grid(128), stream=stream0)
buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_3, y_1], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf53, buf54, buf55, primals_12, primals_13, buf58, 230400, grid=grid(230400), stream=stream0)
del primals_13
buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf60 # reuse
buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_4, sub_4, add_4, sqrt_4, w_4], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf62, primals_14, buf63, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf65 = buf55; del buf55 # reuse
buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf64, buf65, buf66, buf68, 128, 7200, grid=grid(128), stream=stream0)
buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32)
buf70 = buf69; del buf69 # reuse
# Topologically Sorted Source Nodes: [residual_1, y_2, add_5, y_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_16.run(buf70, buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66, primals_15, primals_16, 921600, grid=grid(921600), stream=stream0)
del primals_16
del primals_7
buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf72 # reuse
buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_5, sub_5, add_6, sqrt_5, w_5], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf74, primals_17, buf75, 256, 1024, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf77 = buf66; del buf66 # reuse
buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_5], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf76, buf77, buf78, buf80, 128, 1800, grid=grid(128), stream=stream0)
buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_5, y_4], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf76, buf77, buf78, primals_18, primals_19, buf81, 230400, grid=grid(230400), stream=stream0)
del primals_19
buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf83 # reuse
buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_6, sub_6, add_7, sqrt_6, w_6], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf85, buf3, buf86, 256, 2304, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf88 = buf78; del buf78 # reuse
buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_6], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf87, buf88, buf89, buf91, 128, 1800, grid=grid(128), stream=stream0)
buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_6, y_5], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf87, buf88, buf89, primals_21, primals_22, buf92, 230400, grid=grid(230400), stream=stream0)
del primals_22
buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf94 # reuse
buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_7, sub_7, add_8, sqrt_7, w_7], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf96, primals_23, buf97, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf99 = buf89; del buf89 # reuse
buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_6], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf98, buf99, buf100, buf102, 128, 7200, grid=grid(128), stream=stream0)
buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32)
# Topologically Sorted Source Nodes: [y_6, add_9, y_7], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_18.run(buf70, buf98, buf99, buf100, primals_24, primals_25, buf103, 921600, grid=grid(921600), stream=stream0)
del primals_25
buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf105 # reuse
buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_8, sub_8, add_10, sqrt_8, w_8], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf107, primals_26, buf108, 256, 1024, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf110 = buf100; del buf100 # reuse
buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_8], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf109, buf110, buf111, buf113, 128, 1800, grid=grid(128), stream=stream0)
buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_8, y_8], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf109, buf110, buf111, primals_27, primals_28, buf114, 230400, grid=grid(230400), stream=stream0)
del primals_28
buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf116 # reuse
buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_9, sub_9, add_11, sqrt_9, w_9], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf118, buf4, buf119, 256, 2304, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf121 = buf111; del buf111 # reuse
buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_9], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf120, buf121, buf122, buf124, 128, 1800, grid=grid(128), stream=stream0)
buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_9, y_9], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf120, buf121, buf122, primals_30, primals_31, buf125, 230400, grid=grid(230400), stream=stream0)
del primals_31
buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf127 # reuse
buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_10, sub_10, add_12, sqrt_10, w_10], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf129, primals_32, buf130, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf132 = buf122; del buf122 # reuse
buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_10], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf131, buf132, buf133, buf135, 128, 7200, grid=grid(128), stream=stream0)
buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32)
# Topologically Sorted Source Nodes: [y_10, add_13, y_11], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_18.run(buf103, buf131, buf132, buf133, primals_33, primals_34, buf136, 921600, grid=grid(921600), stream=stream0)
del primals_34
buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf138 # reuse
buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_11, sub_11, add_14, sqrt_11, w_11], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_17.run(buf140, primals_35, buf141, 256, 1024, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf143 = buf133; del buf133 # reuse
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_11], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf142, buf143, buf144, buf146, 128, 1800, grid=grid(128), stream=stream0)
buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_11, y_12], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf142, buf143, buf144, primals_36, primals_37, buf147, 230400, grid=grid(230400), stream=stream0)
del primals_37
buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256), torch.float32)
buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0); del buf149 # reuse
buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_12, sub_12, add_15, sqrt_12, w_12], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_14.run(buf151, buf5, buf152, 256, 2304, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf154 = buf144; del buf144 # reuse
buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_12], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_12.run(buf153, buf154, buf155, buf157, 128, 1800, grid=grid(128), stream=stream0)
buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_12, y_13], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_13.run(buf153, buf154, buf155, primals_39, primals_40, buf158, 230400, grid=grid(230400), stream=stream0)
del primals_40
buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf160 # reuse
buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_13, sub_13, add_16, sqrt_13, w_13], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_9.run(buf162, primals_41, buf163, 1024, 256, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf165 = buf155; del buf155 # reuse
buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_14], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_15.run(buf164, buf165, buf166, buf168, 128, 7200, grid=grid(128), stream=stream0)
buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360, 1024), torch.float32)
# Topologically Sorted Source Nodes: [y_14, add_17, y_15], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_18.run(buf136, buf164, buf165, buf166, primals_42, primals_43, buf169, 921600, grid=grid(921600), stream=stream0)
del primals_43
buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf171 # reuse
buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_14, sub_14, add_18, sqrt_14, w_14], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_19.run(buf173, primals_44, buf174, 2048, 1024, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [residual_2], Original ATen: [aten.convolution]
buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32)
buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32)
buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192), torch.float32)
# Topologically Sorted Source Nodes: [residual_3], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_20.run(buf175, buf176, buf177, buf179, 8192, 64, grid=grid(8192), stream=stream0)
buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf181 # reuse
buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_15, sub_15, add_19, sqrt_15, w_15], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_21.run(buf183, primals_47, buf184, 512, 1024, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution]
buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512))
buf186 = buf166; del buf166 # reuse
buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_15], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_22.run(buf185, buf186, buf187, buf189, 128, 3600, grid=grid(128), stream=stream0)
buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_15, y_16], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_23.run(buf185, buf186, buf187, primals_48, primals_49, buf190, 460800, grid=grid(460800), stream=stream0)
del primals_49
buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf192 # reuse
buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_16, sub_16, add_20, sqrt_16, w_16], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf194, buf6, buf195, 512, 4608, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf197 = buf187; del buf187 # reuse
buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_16], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf196, buf197, buf198, buf200, 128, 1024, grid=grid(128), stream=stream0)
buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_16, y_17], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf196, buf197, buf198, primals_51, primals_52, buf201, 131072, grid=grid(131072), stream=stream0)
del primals_52
buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf203 # reuse
buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_17, sub_17, add_21, sqrt_17, w_17], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf205, primals_53, buf206, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf208 = buf198; del buf198 # reuse
buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_18], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf207, buf208, buf209, buf211, 128, 4096, grid=grid(128), stream=stream0)
buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
buf213 = buf212; del buf212 # reuse
# Topologically Sorted Source Nodes: [residual_3, y_18, add_22, y_19], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_29.run(buf213, buf175, buf176, buf177, primals_45, primals_46, buf207, buf208, buf209, primals_54, primals_55, 524288, grid=grid(524288), stream=stream0)
del buf177
del primals_46
del primals_55
buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf215 # reuse
buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_18, sub_18, add_23, sqrt_18, w_18], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf217, primals_56, buf218, 512, 2048, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf220 = buf209; del buf209 # reuse
buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_18], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf219, buf220, buf221, buf223, 128, 1024, grid=grid(128), stream=stream0)
buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_18, y_20], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf219, buf220, buf221, primals_57, primals_58, buf224, 131072, grid=grid(131072), stream=stream0)
del primals_58
buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf226 # reuse
buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_19, sub_19, add_24, sqrt_19, w_19], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf228, buf7, buf229, 512, 4608, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution]
buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf231 = buf221; del buf221 # reuse
buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_19], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf230, buf231, buf232, buf234, 128, 1024, grid=grid(128), stream=stream0)
buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_19, y_21], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf230, buf231, buf232, primals_60, primals_61, buf235, 131072, grid=grid(131072), stream=stream0)
del primals_61
buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf237 # reuse
buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_20, sub_20, add_25, sqrt_20, w_20], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf239, primals_62, buf240, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf242 = buf232; del buf232 # reuse
buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_22], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf241, buf242, buf243, buf245, 128, 4096, grid=grid(128), stream=stream0)
buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [y_22, add_26, y_23], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_31.run(buf213, buf241, buf242, buf243, primals_63, primals_64, buf246, 524288, grid=grid(524288), stream=stream0)
del primals_64
buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf248 # reuse
buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_21, sub_21, add_27, sqrt_21, w_21], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf250, primals_65, buf251, 512, 2048, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf253 = buf243; del buf243 # reuse
buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_21], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf252, buf253, buf254, buf256, 128, 1024, grid=grid(128), stream=stream0)
buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_21, y_24], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf252, buf253, buf254, primals_66, primals_67, buf257, 131072, grid=grid(131072), stream=stream0)
del primals_67
buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf259 # reuse
buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_22, sub_22, add_28, sqrt_22, w_22], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf261, buf8, buf262, 512, 4608, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf264 = buf254; del buf254 # reuse
buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_22], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf263, buf264, buf265, buf267, 128, 1024, grid=grid(128), stream=stream0)
buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_22, y_25], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf263, buf264, buf265, primals_69, primals_70, buf268, 131072, grid=grid(131072), stream=stream0)
del primals_70
buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf270 # reuse
buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_23, sub_23, add_29, sqrt_23, w_23], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf272, primals_71, buf273, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_23], Original ATen: [aten.convolution]
buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf275 = buf265; del buf265 # reuse
buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_26], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf274, buf275, buf276, buf278, 128, 4096, grid=grid(128), stream=stream0)
buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [y_26, add_30, y_27], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_31.run(buf246, buf274, buf275, buf276, primals_72, primals_73, buf279, 524288, grid=grid(524288), stream=stream0)
del primals_73
buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf281 # reuse
buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_24, sub_24, add_31, sqrt_24, w_24], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_30.run(buf283, primals_74, buf284, 512, 2048, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_24], Original ATen: [aten.convolution]
buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf286 = buf276; del buf276 # reuse
buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_24], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf285, buf286, buf287, buf289, 128, 1024, grid=grid(128), stream=stream0)
buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_24, y_28], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf285, buf286, buf287, primals_75, primals_76, buf290, 131072, grid=grid(131072), stream=stream0)
del primals_76
buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512), torch.float32)
buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0); del buf292 # reuse
buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_25, sub_25, add_32, sqrt_25, w_25], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_24.run(buf294, buf9, buf295, 512, 4608, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_25], Original ATen: [aten.convolution]
buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf297 = buf287; del buf287 # reuse
buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_25], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_25.run(buf296, buf297, buf298, buf300, 128, 1024, grid=grid(128), stream=stream0)
buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_25, y_29], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_26.run(buf296, buf297, buf298, primals_78, primals_79, buf301, 131072, grid=grid(131072), stream=stream0)
del primals_79
buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048), torch.float32)
buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0); del buf303 # reuse
buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_26, sub_26, add_33, sqrt_26, w_26], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_27.run(buf305, primals_80, buf306, 2048, 512, grid=grid(2048), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_26], Original ATen: [aten.convolution]
buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf308 = buf298; del buf298 # reuse
buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_30], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_28.run(buf307, buf308, buf309, buf311, 128, 4096, grid=grid(128), stream=stream0)
buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384, 2048), torch.float32)
# Topologically Sorted Source Nodes: [y_30, add_34, y_31], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_31.run(buf279, buf307, buf308, buf309, primals_81, primals_82, buf312, 524288, grid=grid(524288), stream=stream0)
del primals_82
buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096, 4096), 0); del buf34 # reuse
buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf314 # reuse
buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_27, sub_27, add_35, sqrt_27, w_27], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_32.run(buf316, primals_83, buf317, 4096, 2048, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [residual_4], Original ATen: [aten.convolution]
buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32)
buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32)
buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384), torch.float32)
# Topologically Sorted Source Nodes: [residual_5], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_33.run(buf318, buf319, buf320, buf322, 16384, 16, grid=grid(16384), stream=stream0)
buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf324 # reuse
buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048, 2048), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_28, sub_28, add_36, sqrt_28, w_28], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_34.run(buf326, primals_86, buf327, 1024, 2048, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_28], Original ATen: [aten.convolution]
buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf329 = buf309; del buf309 # reuse
buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_28], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_35.run(buf328, buf329, buf330, buf332, 128, 2048, grid=grid(128), stream=stream0)
buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_28, y_32], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_36.run(buf328, buf329, buf330, primals_87, primals_88, buf333, 262144, grid=grid(262144), stream=stream0)
del primals_88
buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf335 # reuse
buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_29, sub_29, add_37, sqrt_29, w_29], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf337, buf10, buf338, 1024, 9216, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_29], Original ATen: [aten.convolution]
buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf340 = buf330; del buf330 # reuse
buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_29], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_38.run(buf339, buf340, buf341, buf343, 128, 512, grid=grid(128), stream=stream0)
buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_29, y_33], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_39.run(buf339, buf340, buf341, primals_90, primals_91, buf344, 65536, grid=grid(65536), stream=stream0)
del primals_91
buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf346 # reuse
buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_30, sub_30, add_38, sqrt_30, w_30], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf348, primals_92, buf349, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_30], Original ATen: [aten.convolution]
buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf351 = buf341; del buf341 # reuse
buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_34], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf350, buf351, buf352, buf354, 128, 2048, grid=grid(128), stream=stream0)
buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
buf356 = buf355; del buf355 # reuse
# Topologically Sorted Source Nodes: [residual_5, y_34, add_39, y_35], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_42.run(buf356, buf318, buf319, buf320, primals_84, primals_85, buf350, buf351, buf352, primals_93, primals_94, 262144, grid=grid(262144), stream=stream0)
del buf320
del primals_85
del primals_94
buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf358 # reuse
buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_31, sub_31, add_40, sqrt_31, w_31], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf360, primals_95, buf361, 1024, 4096, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_31], Original ATen: [aten.convolution]
buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf363 = buf352; del buf352 # reuse
buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_31], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_38.run(buf362, buf363, buf364, buf366, 128, 512, grid=grid(128), stream=stream0)
buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_31, y_36], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_39.run(buf362, buf363, buf364, primals_96, primals_97, buf367, 65536, grid=grid(65536), stream=stream0)
del primals_97
buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf369 # reuse
buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_32, sub_32, add_41, sqrt_32, w_32], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf371, buf11, buf372, 1024, 9216, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_32], Original ATen: [aten.convolution]
buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf374 = buf364; del buf364 # reuse
buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_32], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_38.run(buf373, buf374, buf375, buf377, 128, 512, grid=grid(128), stream=stream0)
buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_32, y_37], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_39.run(buf373, buf374, buf375, primals_99, primals_100, buf378, 65536, grid=grid(65536), stream=stream0)
del primals_100
buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf380 # reuse
buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_33, sub_33, add_42, sqrt_33, w_33], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf382, primals_101, buf383, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_33], Original ATen: [aten.convolution]
buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf385 = buf375; del buf375 # reuse
buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_38], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf384, buf385, buf386, buf388, 128, 2048, grid=grid(128), stream=stream0)
buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [y_38, add_43, y_39], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_44.run(buf356, buf384, buf385, buf386, primals_102, primals_103, buf389, 262144, grid=grid(262144), stream=stream0)
del primals_103
buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf391 # reuse
buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_34, sub_34, add_44, sqrt_34, w_34], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf393, primals_104, buf394, 1024, 4096, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_34], Original ATen: [aten.convolution]
buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf396 = buf386; del buf386 # reuse
buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_34], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_38.run(buf395, buf396, buf397, buf399, 128, 512, grid=grid(128), stream=stream0)
buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_34, y_40], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_39.run(buf395, buf396, buf397, primals_105, primals_106, buf400, 65536, grid=grid(65536), stream=stream0)
del primals_106
buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf402 # reuse
buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_35, sub_35, add_45, sqrt_35, w_35], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf404, buf12, buf405, 1024, 9216, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_35], Original ATen: [aten.convolution]
buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf407 = buf397; del buf397 # reuse
buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_35], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_38.run(buf406, buf407, buf408, buf410, 128, 512, grid=grid(128), stream=stream0)
buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_35, y_41], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_39.run(buf406, buf407, buf408, primals_108, primals_109, buf411, 65536, grid=grid(65536), stream=stream0)
del primals_109
buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf413 # reuse
buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_36, sub_36, add_46, sqrt_36, w_36], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf415, primals_110, buf416, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_36], Original ATen: [aten.convolution]
buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf418 = buf408; del buf408 # reuse
buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_42], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf417, buf418, buf419, buf421, 128, 2048, grid=grid(128), stream=stream0)
buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.float32)
# Topologically Sorted Source Nodes: [y_42, add_47, y_43], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_44.run(buf389, buf417, buf418, buf419, primals_111, primals_112, buf422, 262144, grid=grid(262144), stream=stream0)
del primals_112
buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf424 # reuse
buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096, 4096), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_37, sub_37, add_48, sqrt_37, w_37], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_43.run(buf426, primals_113, buf427, 1024, 4096, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_37], Original ATen: [aten.convolution]
buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf429 = buf419; del buf419 # reuse
buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_37], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_38.run(buf428, buf429, buf430, buf432, 128, 512, grid=grid(128), stream=stream0)
buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_37, y_44], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_39.run(buf428, buf429, buf430, primals_114, primals_115, buf433, 65536, grid=grid(65536), stream=stream0)
del primals_115
buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024), torch.float32)
buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0); del buf435 # reuse
buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_38, sub_38, add_49, sqrt_38, w_38], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_red_fused_add_div_sqrt_sub_var_mean_37.run(buf437, buf13, buf438, 1024, 9216, grid=grid(1024), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_38], Original ATen: [aten.convolution]
buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf440 = buf430; del buf430 # reuse
buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_38], Original ATen: [aten.native_group_norm]
triton_per_fused_native_group_norm_38.run(buf439, buf440, buf441, buf443, 128, 512, grid=grid(128), stream=stream0)
buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024), torch.float32)
# Topologically Sorted Source Nodes: [group_norm_38, y_45], Original ATen: [aten.native_group_norm, aten.relu]
triton_poi_fused_native_group_norm_relu_39.run(buf439, buf440, buf441, primals_117, primals_118, buf444, 65536, grid=grid(65536), stream=stream0)
del primals_118
buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096), torch.float32)
buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0); del buf446 # reuse
buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024, 1024), torch.float32)
# Topologically Sorted Source Nodes: [var_mean_39, sub_39, add_50, sqrt_39, w_39], Original ATen: [aten.var_mean, aten.sub, aten.add, aten.sqrt, aten.div]
triton_per_fused_add_div_sqrt_sub_var_mean_40.run(buf448, primals_119, buf449, 4096, 1024, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_39], Original ATen: [aten.convolution]
buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf451 = buf441; del buf441 # reuse
buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.float32)
# Topologically Sorted Source Nodes: [y_46], Original ATen: [aten.native_group_norm]
triton_red_fused_native_group_norm_41.run(buf450, buf451, buf452, buf454, 128, 2048, grid=grid(128), stream=stream0)
buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_46, add_51, y_47], Original ATen: [aten.native_group_norm, aten.add, aten.relu]
triton_poi_fused_add_native_group_norm_relu_45.run(buf422, buf450, buf451, buf452, primals_120, primals_121, buf455, 64, 4096, grid=grid(64, 4096), stream=stream0)
del buf452
del primals_121
buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096), torch.bool)
# Topologically Sorted Source Nodes: [], Original ATen: [aten.threshold_backward]
triton_poi_fused_threshold_backward_46.run(buf455, buf456, 16384, 16, grid=grid(16384, 16), stream=stream0)
return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8, primals_9, buf2, primals_12, primals_14, primals_15, primals_17, primals_18, buf3, primals_21, primals_23, primals_24, primals_26, primals_27, buf4, primals_30, primals_32, primals_33, primals_35, primals_36, buf5, primals_39, primals_41, primals_42, primals_44, primals_45, primals_47, primals_48, buf6, primals_51, primals_53, primals_54, primals_56, primals_57, buf7, primals_60, primals_62, primals_63, primals_65, primals_66, buf8, primals_69, primals_71, primals_72, primals_74, primals_75, buf9, primals_78, primals_80, primals_81, primals_83, primals_84, primals_86, primals_87, buf10, primals_90, primals_92, primals_93, primals_95, primals_96, buf11, primals_99, primals_101, primals_102, primals_104, primals_105, buf12, primals_108, primals_110, primals_111, primals_113, primals_114, buf13, primals_117, primals_119, primals_120, buf17, buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0), reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26, buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1), 0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40, buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0), reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52, buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0), reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63, buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0), reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75, buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0), reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86, buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0), reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97, buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0), reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107, buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0), reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118, buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0), reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129, buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0), reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140, buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0), reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151, buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0), reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162, buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0), reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173, buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0), reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184, buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0), reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194, buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0), reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205, buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0), reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217, buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0), reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228, buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0), reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239, buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0), reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250, buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0), reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261, buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0), reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272, buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0), reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283, buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0), reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294, buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0), reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305, buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0), reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316, buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0), reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327, buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0), reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337, buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0), reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348, buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0), reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360, buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0), reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371, buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0), reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382, buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0), reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393, buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0), reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404, buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0), reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415, buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0), reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426, buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0), reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437, buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0), reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448, buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0), reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 3, 7, 7), (147, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((256, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((1024, 256, 1, 1), (256, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((2048, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((512, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_64 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_65 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_66 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_67 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_68 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_69 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_70 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_71 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_72 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_73 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_74 = rand_strided((512, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_75 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_76 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_77 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_78 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_79 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_80 = rand_strided((2048, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_81 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_82 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_83 = rand_strided((4096, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_84 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_85 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_86 = rand_strided((1024, 2048, 1, 1), (2048, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_87 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_88 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_89 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_90 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_91 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_92 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_93 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_94 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_95 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_96 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_97 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_98 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_99 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_100 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_101 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_102 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_103 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_104 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_105 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_106 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_107 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_108 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_109 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_110 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_111 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_112 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_113 = rand_strided((1024, 4096, 1, 1), (4096, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_114 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_115 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_116 = rand_strided((1024, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_117 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_118 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_119 = rand_strided((4096, 1024, 1, 1), (1024, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_120 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_121 = rand_strided((4096, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63, primals_64, primals_65, primals_66, primals_67, primals_68, primals_69, primals_70, primals_71, primals_72, primals_73, primals_74, primals_75, primals_76, primals_77, primals_78, primals_79, primals_80, primals_81, primals_82, primals_83, primals_84, primals_85, primals_86, primals_87, primals_88, primals_89, primals_90, primals_91, primals_92, primals_93, primals_94, primals_95, primals_96, primals_97, primals_98, primals_99, primals_100, primals_101, primals_102, primals_103, primals_104, primals_105, primals_106, primals_107, primals_108, primals_109, primals_110, primals_111, primals_112, primals_113, primals_114, primals_115, primals_116, primals_117, primals_118, primals_119, primals_120, primals_121])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1,
bias=bias, groups=groups)
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout)
def forward(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = self.relu(self.gn1(self.conv1(x)))
y = self.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = self.relu(residual + y)
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')
], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')
], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')
], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
self.conv1.weight.copy_(conv1_weight)
self.conv2.weight.copy_(conv2_weight)
self.conv3.weight.copy_(conv3_weight)
self.gn1.weight.copy_(gn1_weight.view(-1))
self.gn1.bias.copy_(gn1_bias.view(-1))
self.gn2.weight.copy_(gn2_weight.view(-1))
self.gn2.bias.copy_(gn2_bias.view(-1))
self.gn3.weight.copy_(gn3_weight.view(-1))
self.gn3.bias.copy_(gn3_bias.view(-1))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit,
'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/bias')])
self.downsample.weight.copy_(proj_conv_weight)
self.gn_proj.weight.copy_(proj_gn_weight.view(-1))
self.gn_proj.bias.copy_(proj_gn_bias.view(-1))
class ResNetV2(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor):
super().__init__()
width = int(64 * width_factor)
self.width = width
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width,
kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.
GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True
)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width *
4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width *
4, cout=width * 4, cmid=width)) for i in range(2, block_units[0
] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8,
cout=width * 8, cmid=width * 2)) for i in range(2, block_units[
1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16,
cout=width * 16, cmid=width * 4)) for i in range(2, block_units
[2] + 1)])))]))
def forward(self, x):
x = self.root(x)
x = self.body(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {'block_units': [4, 4, 4], 'width_factor': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 768
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 147 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 12
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 3
y1 = yindex // 3
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 256
y1 = yindex // 256
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 512 * x2 + 4608 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 9
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 1024
y1 = yindex // 1024
tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last'
)
tl.store(out_ptr0 + (y0 + 1024 * x2 + 9216 * y1), tmp0, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_5(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 256
rnumel = 147
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 147 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask & xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 147, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask & xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 147.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.sqrt(tmp20)
tmp22 = tmp0 - tmp10
tmp23 = tmp22 / tmp21
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp21, xmask)
tl.store(out_ptr1 + (r1 + 147 * x0), tmp23, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_6(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 262144 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 8192.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 256
x2 = xindex // 262144
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 8192.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 256
x1 = xindex // 256 % 15
x2 = xindex // 3840 % 15
x3 = xindex // 57600
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 16384 * x2 + 262144 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp3 = tl.load(in_ptr0 + (512 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp5 = tl.load(in_ptr0 + (8192 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp7 = tl.load(in_ptr0 + (8448 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp9 = tl.load(in_ptr0 + (8704 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp11 = tl.load(in_ptr0 + (16384 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp13 = tl.load(in_ptr0 + (16640 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp15 = tl.load(in_ptr0 + (16896 + x0 + 512 * x1 + 16384 * x2 + 262144 *
x3), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x4, tmp16, xmask)
tl.store(out_ptr1 + x4, tmp41, xmask)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_9(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_per_fused_native_group_norm_10(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
rnumel = 225
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 1024
x1 = xindex // 1024
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 1024 * r2 + 230400 * x1), rmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(rmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(rmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 225, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(rmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 225.0
tmp18 = tmp16 / tmp17
tmp19 = 1e-05
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x3, tmp21, None)
tl.store(out_ptr0 + x3, tmp10, None)
tl.store(out_ptr1 + x3, tmp16, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_11(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 256 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 256, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 256 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_12(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 1800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 8
r3 = rindex // 8
tmp0 = tl.load(in_ptr0 + (r2 + 8 * x0 + 256 * r3 + 57600 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 1800.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_13(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 230400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 256
x2 = xindex // 57600
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 8), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 8), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1800.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_14(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 256
rnumel = 2304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2304.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2304 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2304 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_15(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 7200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 230400 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 7200.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_16(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 230400
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 1024 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 1024 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 225.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 7200.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_17(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_18(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 230400
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 7200.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_19(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_per_fused_native_group_norm_20(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 2048
x1 = xindex // 2048
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r2 + 131072 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 64.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x3, tmp18, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp13, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_21(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_22(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 3600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 16
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 115200 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 3600.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_23(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 115200
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 3600.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_24(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4608.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4608 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4608 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_per_fused_native_group_norm_25(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 16
r3 = rindex // 16
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 16 * x0 + 512 * r3 + 32768 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_26(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 512
x2 = xindex // 32768
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 16), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 1024.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_27(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 512 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 512 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_28(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 64
r3 = rindex // 64
tmp0 = tl.load(in_ptr0 + (r2 + 64 * x0 + 2048 * r3 + 131072 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_29(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 2048 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 2048 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 64.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 4096.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_30(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 512
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_31(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 2048
x2 = xindex // 131072
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 64), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 4096.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_32(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, None)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask, eviction_policy=
'evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask)
@triton.jit
def triton_per_fused_native_group_norm_33(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x0 = xindex % 4096
x1 = xindex // 4096
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4096 * r2 + 65536 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK])
tmp13 = tl.sum(tmp11, 1)[:, None]
tmp14 = 16.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x3, tmp18, None)
tl.store(out_ptr0 + x3, tmp8, None)
tl.store(out_ptr1 + x3, tmp13, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_34(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 2048 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 2048 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_red_fused_native_group_norm_35(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 32
r3 = rindex // 32
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_native_group_norm_relu_36(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 2048.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_37(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 9216.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 9216 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 9216 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_per_fused_native_group_norm_38(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex % 32
r3 = rindex // 32
x0 = xindex % 32
x1 = xindex // 32
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 32 * x0 + 1024 * r3 + 16384 * x1), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 512, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 512.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-06
tmp17 = tmp15 + tmp16
tmp18 = libdevice.rsqrt(tmp17)
tl.store(out_ptr2 + x4, tmp18, None)
tl.store(out_ptr0 + x4, tmp8, None)
tl.store(out_ptr1 + x4, tmp13, None)
@triton.jit
def triton_poi_fused_native_group_norm_relu_39(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 1024
x2 = xindex // 16384
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (32 * x2 + x0 // 32), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 512.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-06
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tl.store(out_ptr0 + x3, tmp15, None)
@triton.jit
def triton_per_fused_add_div_sqrt_sub_var_mean_40(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 1024 * x0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = tl.broadcast_to(tmp1, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.full([1], 1024, tl.int32)
tmp7 = tmp6.to(tl.float32)
tmp8 = tmp5 / tmp7
tmp9 = tmp1 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 1024.0
tmp15 = tmp13 / tmp14
tmp16 = 1e-05
tmp17 = tmp15 + tmp16
tmp18 = libdevice.sqrt(tmp17)
tmp19 = tmp0 - tmp8
tmp20 = tmp19 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp18, None)
tl.store(out_ptr1 + (r1 + 1024 * x0), tmp20, None)
@triton.jit
def triton_red_fused_native_group_norm_41(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex % 32
x1 = xindex // 32
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
x4 = xindex
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex % 128
r3 = rindex // 128
tmp0 = tl.load(in_ptr0 + (r2 + 128 * x0 + 4096 * r3 + 65536 * x1),
rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tl.store(out_ptr0 + x4, tmp2, xmask)
tl.store(out_ptr1 + x4, tmp3, xmask)
tmp5 = 2048.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tl.store(out_ptr2 + x4, tmp9, xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_42(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 4096 * x2), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr3 + x0, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr5 + x3, None)
tmp15 = tl.load(in_ptr6 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr7 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp24 = tl.load(in_ptr8 + x0, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16.0
tmp5 = tmp3 / tmp4
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp16 = tmp14 - tmp15
tmp18 = 2048.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-06
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp16 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tmp13 + tmp27
tmp29 = tl.full([1], 0, tl.int32)
tmp30 = triton_helpers.maximum(tmp29, tmp28)
tl.store(in_out_ptr0 + x3, tmp30, None)
@triton.jit
def triton_red_fused_add_div_sqrt_sub_var_mean_43(in_out_ptr0, in_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr, RBLOCK: tl.constexpr):
xnumel = 1024
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
tmp2_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp2_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_last', other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp2_mean_next, tmp2_m2_next, tmp2_weight_next = (triton_helpers.
welford_reduce(tmp1, tmp2_mean, tmp2_m2, tmp2_weight, roffset == 0)
)
tmp2_mean = tl.where(rmask & xmask, tmp2_mean_next, tmp2_mean)
tmp2_m2 = tl.where(rmask & xmask, tmp2_m2_next, tmp2_m2)
tmp2_weight = tl.where(rmask & xmask, tmp2_weight_next, tmp2_weight)
tmp2_tmp, tmp3_tmp, tmp4_tmp = triton_helpers.welford(tmp2_mean,
tmp2_m2, tmp2_weight, 1)
tmp2 = tmp2_tmp[:, None]
tmp3 = tmp3_tmp[:, None]
tmp4_tmp[:, None]
tmp5 = 4096.0
tmp6 = tmp3 / tmp5
tmp7 = 1e-05
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp9, xmask)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp10 = tl.load(in_ptr0 + (r1 + 4096 * x0), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp11 = tmp10 - tmp2
tmp12 = tmp11 / tmp9
tl.store(out_ptr1 + (r1 + 4096 * x0), tmp12, rmask & xmask)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_44(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x0 = xindex % 4096
x2 = xindex // 65536
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x3, None)
tmp2 = tl.load(in_ptr2 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * x2 + x0 // 128), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + x3, tmp17, None)
@triton.jit
def triton_poi_fused_add_native_group_norm_relu_45(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK:
tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y1 = yindex // 16
y0 = yindex % 16
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + (32 * y1 + x2 // 128), ymask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + (32 * y1 + x2 // 128), ymask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp3 = tmp1 - tmp2
tmp5 = 2048.0
tmp6 = tmp4 / tmp5
tmp7 = 1e-06
tmp8 = tmp6 + tmp7
tmp9 = libdevice.rsqrt(tmp8)
tmp10 = tmp3 * tmp9
tmp12 = tmp10 * tmp11
tmp14 = tmp12 + tmp13
tmp15 = tmp0 + tmp14
tmp16 = tl.full([1, 1], 0, tl.int32)
tmp17 = triton_helpers.maximum(tmp16, tmp15)
tl.store(out_ptr0 + (y0 + 16 * x2 + 65536 * y1), tmp17, ymask)
@triton.jit
def triton_poi_fused_threshold_backward_46(in_ptr0, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4096
y1 = yindex // 4096
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tmp1 = 0.0
tmp2 = tmp0 <= tmp1
tl.store(out_ptr0 + (y0 + 4096 * x2 + 65536 * y1), tmp2, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62,
primals_63, primals_64, primals_65, primals_66, primals_67,
primals_68, primals_69, primals_70, primals_71, primals_72,
primals_73, primals_74, primals_75, primals_76, primals_77,
primals_78, primals_79, primals_80, primals_81, primals_82,
primals_83, primals_84, primals_85, primals_86, primals_87,
primals_88, primals_89, primals_90, primals_91, primals_92,
primals_93, primals_94, primals_95, primals_96, primals_97,
primals_98, primals_99, primals_100, primals_101, primals_102,
primals_103, primals_104, primals_105, primals_106, primals_107,
primals_108, primals_109, primals_110, primals_111, primals_112,
primals_113, primals_114, primals_115, primals_116, primals_117,
primals_118, primals_119, primals_120, primals_121) = args
args.clear()
assert_size_stride(primals_1, (256, 3, 7, 7), (147, 49, 7, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (256,), (1,))
assert_size_stride(primals_4, (256,), (1,))
assert_size_stride(primals_5, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_6, (1024,), (1,))
assert_size_stride(primals_7, (1024,), (1,))
assert_size_stride(primals_8, (256, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (256,), (1,))
assert_size_stride(primals_11, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_12, (256,), (1,))
assert_size_stride(primals_13, (256,), (1,))
assert_size_stride(primals_14, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_15, (1024,), (1,))
assert_size_stride(primals_16, (1024,), (1,))
assert_size_stride(primals_17, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_18, (256,), (1,))
assert_size_stride(primals_19, (256,), (1,))
assert_size_stride(primals_20, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_21, (256,), (1,))
assert_size_stride(primals_22, (256,), (1,))
assert_size_stride(primals_23, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_24, (1024,), (1,))
assert_size_stride(primals_25, (1024,), (1,))
assert_size_stride(primals_26, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_27, (256,), (1,))
assert_size_stride(primals_28, (256,), (1,))
assert_size_stride(primals_29, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_30, (256,), (1,))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_33, (1024,), (1,))
assert_size_stride(primals_34, (1024,), (1,))
assert_size_stride(primals_35, (256, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_36, (256,), (1,))
assert_size_stride(primals_37, (256,), (1,))
assert_size_stride(primals_38, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_39, (256,), (1,))
assert_size_stride(primals_40, (256,), (1,))
assert_size_stride(primals_41, (1024, 256, 1, 1), (256, 1, 1, 1))
assert_size_stride(primals_42, (1024,), (1,))
assert_size_stride(primals_43, (1024,), (1,))
assert_size_stride(primals_44, (2048, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_45, (2048,), (1,))
assert_size_stride(primals_46, (2048,), (1,))
assert_size_stride(primals_47, (512, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_48, (512,), (1,))
assert_size_stride(primals_49, (512,), (1,))
assert_size_stride(primals_50, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_51, (512,), (1,))
assert_size_stride(primals_52, (512,), (1,))
assert_size_stride(primals_53, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_54, (2048,), (1,))
assert_size_stride(primals_55, (2048,), (1,))
assert_size_stride(primals_56, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_57, (512,), (1,))
assert_size_stride(primals_58, (512,), (1,))
assert_size_stride(primals_59, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_60, (512,), (1,))
assert_size_stride(primals_61, (512,), (1,))
assert_size_stride(primals_62, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_63, (2048,), (1,))
assert_size_stride(primals_64, (2048,), (1,))
assert_size_stride(primals_65, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_66, (512,), (1,))
assert_size_stride(primals_67, (512,), (1,))
assert_size_stride(primals_68, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_69, (512,), (1,))
assert_size_stride(primals_70, (512,), (1,))
assert_size_stride(primals_71, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_72, (2048,), (1,))
assert_size_stride(primals_73, (2048,), (1,))
assert_size_stride(primals_74, (512, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_75, (512,), (1,))
assert_size_stride(primals_76, (512,), (1,))
assert_size_stride(primals_77, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_78, (512,), (1,))
assert_size_stride(primals_79, (512,), (1,))
assert_size_stride(primals_80, (2048, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_81, (2048,), (1,))
assert_size_stride(primals_82, (2048,), (1,))
assert_size_stride(primals_83, (4096, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_84, (4096,), (1,))
assert_size_stride(primals_85, (4096,), (1,))
assert_size_stride(primals_86, (1024, 2048, 1, 1), (2048, 1, 1, 1))
assert_size_stride(primals_87, (1024,), (1,))
assert_size_stride(primals_88, (1024,), (1,))
assert_size_stride(primals_89, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_90, (1024,), (1,))
assert_size_stride(primals_91, (1024,), (1,))
assert_size_stride(primals_92, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_93, (4096,), (1,))
assert_size_stride(primals_94, (4096,), (1,))
assert_size_stride(primals_95, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_96, (1024,), (1,))
assert_size_stride(primals_97, (1024,), (1,))
assert_size_stride(primals_98, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_99, (1024,), (1,))
assert_size_stride(primals_100, (1024,), (1,))
assert_size_stride(primals_101, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_102, (4096,), (1,))
assert_size_stride(primals_103, (4096,), (1,))
assert_size_stride(primals_104, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_105, (1024,), (1,))
assert_size_stride(primals_106, (1024,), (1,))
assert_size_stride(primals_107, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_108, (1024,), (1,))
assert_size_stride(primals_109, (1024,), (1,))
assert_size_stride(primals_110, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_111, (4096,), (1,))
assert_size_stride(primals_112, (4096,), (1,))
assert_size_stride(primals_113, (1024, 4096, 1, 1), (4096, 1, 1, 1))
assert_size_stride(primals_114, (1024,), (1,))
assert_size_stride(primals_115, (1024,), (1,))
assert_size_stride(primals_116, (1024, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_117, (1024,), (1,))
assert_size_stride(primals_118, (1024,), (1,))
assert_size_stride(primals_119, (4096, 1024, 1, 1), (1024, 1, 1, 1))
assert_size_stride(primals_120, (4096,), (1,))
assert_size_stride(primals_121, (4096,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
get_raw_stream(0)
triton_poi_fused_0[grid(768, 49)](primals_1, buf0, 768, 49, XBLOCK=
32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch
.float32)
triton_poi_fused_1[grid(12, 4096)](primals_2, buf1, 12, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_11, buf2, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_11
buf3 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_20, buf3, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_20
buf4 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_29, buf4, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_29
buf5 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_poi_fused_2[grid(65536, 9)](primals_38, buf5, 65536, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_38
buf6 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_50, buf6, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_50
buf7 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_59, buf7, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_59
buf8 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_68, buf8, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_68
buf9 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_poi_fused_3[grid(262144, 9)](primals_77, buf9, 262144, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_77
buf10 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_89, buf10, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_89
buf11 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_98, buf11, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_98
buf12 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_107, buf12, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_107
buf13 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072, 1024
), torch.float32)
triton_poi_fused_4[grid(1048576, 9)](primals_116, buf13, 1048576, 9,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_116
buf15 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf17 = reinterpret_tensor(buf15, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf15
buf18 = empty_strided_cuda((256, 3, 7, 7), (147, 1, 21, 3), torch.
float32)
triton_per_fused_add_div_sqrt_sub_var_mean_5[grid(256)](buf17, buf0,
buf18, 256, 147, XBLOCK=1, num_warps=2, num_stages=1)
buf19 = extern_kernels.convolution(buf1, buf18, stride=(2, 2),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 256, 32, 32), (262144, 1, 8192, 256))
buf20 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf21 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf23 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_6[grid(128)](buf19, buf20, buf21,
buf23, 128, 8192, XBLOCK=1, RBLOCK=2048, num_warps=16, num_stages=1
)
buf24 = empty_strided_cuda((4, 256, 32, 32), (262144, 1, 8192, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_7[grid(1048576)](buf19,
buf20, buf21, primals_3, primals_4, buf24, 1048576, XBLOCK=512,
num_warps=8, num_stages=1)
del primals_4
buf25 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
buf26 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_8[grid(230400)](buf24,
buf25, buf26, 230400, XBLOCK=512, num_warps=8, num_stages=1)
buf28 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf30 = reinterpret_tensor(buf28, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf28
buf31 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf30,
primals_5, buf31, 1024, 256, num_warps=2, num_stages=1)
buf32 = extern_kernels.convolution(buf25, buf31, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf32, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf33 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
buf34 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
buf36 = empty_strided_cuda((4, 1024, 1, 1), (1024, 1, 4096, 4096),
torch.float32)
triton_per_fused_native_group_norm_10[grid(4096)](buf32, buf33,
buf34, buf36, 4096, 225, XBLOCK=1, num_warps=2, num_stages=1)
buf38 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf40 = reinterpret_tensor(buf38, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf38
buf41 = empty_strided_cuda((256, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_11[grid(256)](buf40,
primals_8, buf41, 256, 256, num_warps=2, num_stages=1)
buf42 = extern_kernels.convolution(buf25, buf41, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf42, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf43 = buf21
del buf21
buf44 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf46 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf42, buf43,
buf44, buf46, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf47 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf42,
buf43, buf44, primals_9, primals_10, buf47, 230400, XBLOCK=1024,
num_warps=4, num_stages=1)
del primals_10
buf49 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf51 = reinterpret_tensor(buf49, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf49
buf52 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf51,
buf2, buf52, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf53 = extern_kernels.convolution(buf47, buf52, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf53, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf54 = buf44
del buf44
buf55 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf57 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf53, buf54,
buf55, buf57, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf58 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf53,
buf54, buf55, primals_12, primals_13, buf58, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_13
buf60 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf62 = reinterpret_tensor(buf60, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf60
buf63 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf62,
primals_14, buf63, 1024, 256, num_warps=2, num_stages=1)
buf64 = extern_kernels.convolution(buf58, buf63, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf64, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf65 = buf55
del buf55
buf66 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf68 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_15[grid(128)](buf64, buf65,
buf66, buf68, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf69 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
buf70 = buf69
del buf69
triton_poi_fused_add_native_group_norm_relu_16[grid(921600)](buf70,
buf32, buf33, buf34, primals_6, primals_7, buf64, buf65, buf66,
primals_15, primals_16, 921600, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_16
del primals_7
buf72 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf74 = reinterpret_tensor(buf72, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf72
buf75 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf74,
primals_17, buf75, 256, 1024, num_warps=8, num_stages=1)
buf76 = extern_kernels.convolution(buf70, buf75, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf76, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf77 = buf66
del buf66
buf78 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf80 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf76, buf77,
buf78, buf80, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf81 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf76,
buf77, buf78, primals_18, primals_19, buf81, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_19
buf83 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf85 = reinterpret_tensor(buf83, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf83
buf86 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf85,
buf3, buf86, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf87 = extern_kernels.convolution(buf81, buf86, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf87, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf88 = buf78
del buf78
buf89 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
buf91 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch.
float32)
triton_red_fused_native_group_norm_12[grid(128)](buf87, buf88,
buf89, buf91, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf92 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf87,
buf88, buf89, primals_21, primals_22, buf92, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_22
buf94 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf96 = reinterpret_tensor(buf94, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf94
buf97 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf96,
primals_23, buf97, 1024, 256, num_warps=2, num_stages=1)
buf98 = extern_kernels.convolution(buf92, buf97, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf98, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf99 = buf89
del buf89
buf100 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf102 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf98, buf99,
buf100, buf102, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf103 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf70,
buf98, buf99, buf100, primals_24, primals_25, buf103, 921600,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_25
buf105 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf107 = reinterpret_tensor(buf105, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf105
buf108 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf107,
primals_26, buf108, 256, 1024, num_warps=8, num_stages=1)
buf109 = extern_kernels.convolution(buf103, buf108, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf109, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf110 = buf100
del buf100
buf111 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf113 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf109, buf110,
buf111, buf113, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf114 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf109,
buf110, buf111, primals_27, primals_28, buf114, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_28
buf116 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf118 = reinterpret_tensor(buf116, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf116
buf119 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf118,
buf4, buf119, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf120 = extern_kernels.convolution(buf114, buf119, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf121 = buf111
del buf111
buf122 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf124 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf120, buf121,
buf122, buf124, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf125 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf120,
buf121, buf122, primals_30, primals_31, buf125, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_31
buf127 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf129 = reinterpret_tensor(buf127, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf127
buf130 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf129,
primals_32, buf130, 1024, 256, num_warps=2, num_stages=1)
buf131 = extern_kernels.convolution(buf125, buf130, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf131, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf132 = buf122
del buf122
buf133 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf135 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf131, buf132,
buf133, buf135, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf136 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf103,
buf131, buf132, buf133, primals_33, primals_34, buf136, 921600,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_34
buf138 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf140 = reinterpret_tensor(buf138, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf138
buf141 = empty_strided_cuda((256, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_17[grid(256)](buf140,
primals_35, buf141, 256, 1024, num_warps=8, num_stages=1)
buf142 = extern_kernels.convolution(buf136, buf141, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf142, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf143 = buf133
del buf133
buf144 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf146 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf142, buf143,
buf144, buf146, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf147 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf142,
buf143, buf144, primals_36, primals_37, buf147, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_37
buf149 = empty_strided_cuda((256, 1, 1, 1), (1, 256, 256, 256),
torch.float32)
buf151 = reinterpret_tensor(buf149, (256, 1, 1, 1), (1, 1, 1, 1), 0)
del buf149
buf152 = empty_strided_cuda((256, 256, 3, 3), (2304, 1, 768, 256),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_14[grid(256)](buf151,
buf5, buf152, 256, 2304, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf153 = extern_kernels.convolution(buf147, buf152, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf153, (4, 256, 15, 15), (57600, 1, 3840, 256))
buf154 = buf144
del buf144
buf155 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf157 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_12[grid(128)](buf153, buf154,
buf155, buf157, 128, 1800, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf158 = empty_strided_cuda((4, 256, 15, 15), (57600, 1, 3840, 256),
torch.float32)
triton_poi_fused_native_group_norm_relu_13[grid(230400)](buf153,
buf154, buf155, primals_39, primals_40, buf158, 230400, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_40
buf160 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf162 = reinterpret_tensor(buf160, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf160
buf163 = empty_strided_cuda((1024, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_9[grid(1024)](buf162,
primals_41, buf163, 1024, 256, num_warps=2, num_stages=1)
buf164 = extern_kernels.convolution(buf158, buf163, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf164, (4, 1024, 15, 15), (230400, 1, 15360, 1024))
buf165 = buf155
del buf155
buf166 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf168 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_15[grid(128)](buf164, buf165,
buf166, buf168, 128, 7200, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf169 = empty_strided_cuda((4, 1024, 15, 15), (230400, 1, 15360,
1024), torch.float32)
triton_poi_fused_add_native_group_norm_relu_18[grid(921600)](buf136,
buf164, buf165, buf166, primals_42, primals_43, buf169, 921600,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_43
buf171 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf173 = reinterpret_tensor(buf171, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf171
buf174 = empty_strided_cuda((2048, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_19[grid(2048)](buf173,
primals_44, buf174, 2048, 1024, num_warps=8, num_stages=1)
buf175 = extern_kernels.convolution(buf169, buf174, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf175, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf176 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
buf177 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
buf179 = empty_strided_cuda((4, 2048, 1, 1), (2048, 1, 8192, 8192),
torch.float32)
triton_per_fused_native_group_norm_20[grid(8192)](buf175, buf176,
buf177, buf179, 8192, 64, XBLOCK=8, num_warps=4, num_stages=1)
buf181 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf183 = reinterpret_tensor(buf181, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf181
buf184 = empty_strided_cuda((512, 1024, 1, 1), (1024, 1, 1024, 1024
), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_21[grid(512)](buf183,
primals_47, buf184, 512, 1024, num_warps=8, num_stages=1)
buf185 = extern_kernels.convolution(buf169, buf184, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf185, (4, 512, 15, 15), (115200, 1, 7680, 512))
buf186 = buf166
del buf166
buf187 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf189 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_22[grid(128)](buf185, buf186,
buf187, buf189, 128, 3600, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf190 = empty_strided_cuda((4, 512, 15, 15), (115200, 1, 7680, 512
), torch.float32)
triton_poi_fused_native_group_norm_relu_23[grid(460800)](buf185,
buf186, buf187, primals_48, primals_49, buf190, 460800, XBLOCK=
1024, num_warps=4, num_stages=1)
del primals_49
buf192 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf194 = reinterpret_tensor(buf192, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf192
buf195 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf194,
buf6, buf195, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf196 = extern_kernels.convolution(buf190, buf195, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf196, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf197 = buf187
del buf187
buf198 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf200 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf196, buf197,
buf198, buf200, 128, 1024, num_warps=8, num_stages=1)
buf201 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf196,
buf197, buf198, primals_51, primals_52, buf201, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_52
buf203 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf205 = reinterpret_tensor(buf203, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf203
buf206 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf205,
primals_53, buf206, 2048, 512, num_warps=4, num_stages=1)
buf207 = extern_kernels.convolution(buf201, buf206, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf207, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf208 = buf198
del buf198
buf209 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf211 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf207, buf208,
buf209, buf211, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf212 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
buf213 = buf212
del buf212
triton_poi_fused_add_native_group_norm_relu_29[grid(524288)](buf213,
buf175, buf176, buf177, primals_45, primals_46, buf207, buf208,
buf209, primals_54, primals_55, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf177
del primals_46
del primals_55
buf215 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf217 = reinterpret_tensor(buf215, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf215
buf218 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf217,
primals_56, buf218, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf219 = extern_kernels.convolution(buf213, buf218, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf219, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf220 = buf209
del buf209
buf221 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf223 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf219, buf220,
buf221, buf223, 128, 1024, num_warps=8, num_stages=1)
buf224 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf219,
buf220, buf221, primals_57, primals_58, buf224, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_58
buf226 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf228 = reinterpret_tensor(buf226, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf226
buf229 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf228,
buf7, buf229, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf230 = extern_kernels.convolution(buf224, buf229, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf230, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf231 = buf221
del buf221
buf232 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf234 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf230, buf231,
buf232, buf234, 128, 1024, num_warps=8, num_stages=1)
buf235 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf230,
buf231, buf232, primals_60, primals_61, buf235, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_61
buf237 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf239 = reinterpret_tensor(buf237, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf237
buf240 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf239,
primals_62, buf240, 2048, 512, num_warps=4, num_stages=1)
buf241 = extern_kernels.convolution(buf235, buf240, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf241, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf242 = buf232
del buf232
buf243 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf245 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf241, buf242,
buf243, buf245, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf246 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf213,
buf241, buf242, buf243, primals_63, primals_64, buf246, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_64
buf248 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf250 = reinterpret_tensor(buf248, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf248
buf251 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf250,
primals_65, buf251, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf252 = extern_kernels.convolution(buf246, buf251, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf252, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf253 = buf243
del buf243
buf254 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf256 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf252, buf253,
buf254, buf256, 128, 1024, num_warps=8, num_stages=1)
buf257 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf252,
buf253, buf254, primals_66, primals_67, buf257, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_67
buf259 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf261 = reinterpret_tensor(buf259, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf259
buf262 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf261,
buf8, buf262, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf263 = extern_kernels.convolution(buf257, buf262, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf263, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf264 = buf254
del buf254
buf265 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf267 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf263, buf264,
buf265, buf267, 128, 1024, num_warps=8, num_stages=1)
buf268 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf263,
buf264, buf265, primals_69, primals_70, buf268, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_70
buf270 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf272 = reinterpret_tensor(buf270, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf270
buf273 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf272,
primals_71, buf273, 2048, 512, num_warps=4, num_stages=1)
buf274 = extern_kernels.convolution(buf268, buf273, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf274, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf275 = buf265
del buf265
buf276 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf278 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf274, buf275,
buf276, buf278, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf279 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf246,
buf274, buf275, buf276, primals_72, primals_73, buf279, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_73
buf281 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf283 = reinterpret_tensor(buf281, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf281
buf284 = empty_strided_cuda((512, 2048, 1, 1), (2048, 1, 2048, 2048
), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_30[grid(512)](buf283,
primals_74, buf284, 512, 2048, XBLOCK=1, RBLOCK=2048, num_warps
=16, num_stages=1)
buf285 = extern_kernels.convolution(buf279, buf284, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf285, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf286 = buf276
del buf276
buf287 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf289 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf285, buf286,
buf287, buf289, 128, 1024, num_warps=8, num_stages=1)
buf290 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf285,
buf286, buf287, primals_75, primals_76, buf290, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_76
buf292 = empty_strided_cuda((512, 1, 1, 1), (1, 512, 512, 512),
torch.float32)
buf294 = reinterpret_tensor(buf292, (512, 1, 1, 1), (1, 1, 1, 1), 0)
del buf292
buf295 = empty_strided_cuda((512, 512, 3, 3), (4608, 1, 1536, 512),
torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_24[grid(512)](buf294,
buf9, buf295, 512, 4608, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf296 = extern_kernels.convolution(buf290, buf295, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf296, (4, 512, 8, 8), (32768, 1, 4096, 512))
buf297 = buf287
del buf287
buf298 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf300 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_25[grid(128)](buf296, buf297,
buf298, buf300, 128, 1024, num_warps=8, num_stages=1)
buf301 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512),
torch.float32)
triton_poi_fused_native_group_norm_relu_26[grid(131072)](buf296,
buf297, buf298, primals_78, primals_79, buf301, 131072, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_79
buf303 = empty_strided_cuda((2048, 1, 1, 1), (1, 2048, 2048, 2048),
torch.float32)
buf305 = reinterpret_tensor(buf303, (2048, 1, 1, 1), (1, 1, 1, 1), 0)
del buf303
buf306 = empty_strided_cuda((2048, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_27[grid(2048)](buf305,
primals_80, buf306, 2048, 512, num_warps=4, num_stages=1)
buf307 = extern_kernels.convolution(buf301, buf306, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf307, (4, 2048, 8, 8), (131072, 1, 16384, 2048))
buf308 = buf298
del buf298
buf309 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf311 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_28[grid(128)](buf307, buf308,
buf309, buf311, 128, 4096, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf312 = empty_strided_cuda((4, 2048, 8, 8), (131072, 1, 16384,
2048), torch.float32)
triton_poi_fused_add_native_group_norm_relu_31[grid(524288)](buf279,
buf307, buf308, buf309, primals_81, primals_82, buf312, 524288,
XBLOCK=512, num_warps=8, num_stages=1)
del primals_82
buf314 = reinterpret_tensor(buf34, (4096, 1, 1, 1), (1, 4096, 4096,
4096), 0)
del buf34
buf316 = reinterpret_tensor(buf314, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf314
buf317 = empty_strided_cuda((4096, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_32[grid(4096)](buf316,
primals_83, buf317, 4096, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf318 = extern_kernels.convolution(buf312, buf317, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf318, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf319 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
buf320 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
buf322 = empty_strided_cuda((4, 4096, 1, 1), (4096, 1, 16384, 16384
), torch.float32)
triton_per_fused_native_group_norm_33[grid(16384)](buf318, buf319,
buf320, buf322, 16384, 16, XBLOCK=32, num_warps=4, num_stages=1)
buf324 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf326 = reinterpret_tensor(buf324, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf324
buf327 = empty_strided_cuda((1024, 2048, 1, 1), (2048, 1, 2048,
2048), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_34[grid(1024)](buf326,
primals_86, buf327, 1024, 2048, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf328 = extern_kernels.convolution(buf312, buf327, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf328, (4, 1024, 8, 8), (65536, 1, 8192, 1024))
buf329 = buf309
del buf309
buf330 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf332 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_35[grid(128)](buf328, buf329,
buf330, buf332, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf333 = empty_strided_cuda((4, 1024, 8, 8), (65536, 1, 8192, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_36[grid(262144)](buf328,
buf329, buf330, primals_87, primals_88, buf333, 262144, XBLOCK=
512, num_warps=8, num_stages=1)
del primals_88
buf335 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf337 = reinterpret_tensor(buf335, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf335
buf338 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf337,
buf10, buf338, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf339 = extern_kernels.convolution(buf333, buf338, stride=(2, 2),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf339, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf340 = buf330
del buf330
buf341 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf343 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf339, buf340,
buf341, buf343, 128, 512, num_warps=4, num_stages=1)
buf344 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf339,
buf340, buf341, primals_90, primals_91, buf344, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_91
buf346 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf348 = reinterpret_tensor(buf346, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf346
buf349 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf348,
primals_92, buf349, 4096, 1024, num_warps=8, num_stages=1)
buf350 = extern_kernels.convolution(buf344, buf349, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf350, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf351 = buf341
del buf341
buf352 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf354 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf350, buf351,
buf352, buf354, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf355 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
buf356 = buf355
del buf355
triton_poi_fused_add_native_group_norm_relu_42[grid(262144)](buf356,
buf318, buf319, buf320, primals_84, primals_85, buf350, buf351,
buf352, primals_93, primals_94, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del buf320
del primals_85
del primals_94
buf358 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf360 = reinterpret_tensor(buf358, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf358
buf361 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf360,
primals_95, buf361, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf362 = extern_kernels.convolution(buf356, buf361, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf362, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf363 = buf352
del buf352
buf364 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf366 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf362, buf363,
buf364, buf366, 128, 512, num_warps=4, num_stages=1)
buf367 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf362,
buf363, buf364, primals_96, primals_97, buf367, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_97
buf369 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf371 = reinterpret_tensor(buf369, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf369
buf372 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf371,
buf11, buf372, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf373 = extern_kernels.convolution(buf367, buf372, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf373, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf374 = buf364
del buf364
buf375 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf377 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf373, buf374,
buf375, buf377, 128, 512, num_warps=4, num_stages=1)
buf378 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf373,
buf374, buf375, primals_99, primals_100, buf378, 65536, XBLOCK=
512, num_warps=4, num_stages=1)
del primals_100
buf380 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf382 = reinterpret_tensor(buf380, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf380
buf383 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf382,
primals_101, buf383, 4096, 1024, num_warps=8, num_stages=1)
buf384 = extern_kernels.convolution(buf378, buf383, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf384, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf385 = buf375
del buf375
buf386 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf388 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf384, buf385,
buf386, buf388, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf389 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf356,
buf384, buf385, buf386, primals_102, primals_103, buf389,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_103
buf391 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf393 = reinterpret_tensor(buf391, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf391
buf394 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf393,
primals_104, buf394, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf395 = extern_kernels.convolution(buf389, buf394, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf395, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf396 = buf386
del buf386
buf397 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf399 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf395, buf396,
buf397, buf399, 128, 512, num_warps=4, num_stages=1)
buf400 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf395,
buf396, buf397, primals_105, primals_106, buf400, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_106
buf402 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf404 = reinterpret_tensor(buf402, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf402
buf405 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf404,
buf12, buf405, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf406 = extern_kernels.convolution(buf400, buf405, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf406, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf407 = buf397
del buf397
buf408 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf410 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf406, buf407,
buf408, buf410, 128, 512, num_warps=4, num_stages=1)
buf411 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf406,
buf407, buf408, primals_108, primals_109, buf411, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_109
buf413 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf415 = reinterpret_tensor(buf413, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf413
buf416 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf415,
primals_110, buf416, 4096, 1024, num_warps=8, num_stages=1)
buf417 = extern_kernels.convolution(buf411, buf416, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf417, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf418 = buf408
del buf408
buf419 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf421 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf417, buf418,
buf419, buf421, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf422 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.float32)
triton_poi_fused_add_native_group_norm_relu_44[grid(262144)](buf389,
buf417, buf418, buf419, primals_111, primals_112, buf422,
262144, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_112
buf424 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf426 = reinterpret_tensor(buf424, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf424
buf427 = empty_strided_cuda((1024, 4096, 1, 1), (4096, 1, 4096,
4096), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_43[grid(1024)](buf426,
primals_113, buf427, 1024, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
buf428 = extern_kernels.convolution(buf422, buf427, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf428, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf429 = buf419
del buf419
buf430 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf432 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf428, buf429,
buf430, buf432, 128, 512, num_warps=4, num_stages=1)
buf433 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf428,
buf429, buf430, primals_114, primals_115, buf433, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_115
buf435 = empty_strided_cuda((1024, 1, 1, 1), (1, 1024, 1024, 1024),
torch.float32)
buf437 = reinterpret_tensor(buf435, (1024, 1, 1, 1), (1, 1, 1, 1), 0)
del buf435
buf438 = empty_strided_cuda((1024, 1024, 3, 3), (9216, 1, 3072,
1024), torch.float32)
triton_red_fused_add_div_sqrt_sub_var_mean_37[grid(1024)](buf437,
buf13, buf438, 1024, 9216, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf439 = extern_kernels.convolution(buf433, buf438, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf439, (4, 1024, 4, 4), (16384, 1, 4096, 1024))
buf440 = buf430
del buf430
buf441 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf443 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_per_fused_native_group_norm_38[grid(128)](buf439, buf440,
buf441, buf443, 128, 512, num_warps=4, num_stages=1)
buf444 = empty_strided_cuda((4, 1024, 4, 4), (16384, 1, 4096, 1024),
torch.float32)
triton_poi_fused_native_group_norm_relu_39[grid(65536)](buf439,
buf440, buf441, primals_117, primals_118, buf444, 65536, XBLOCK
=512, num_warps=4, num_stages=1)
del primals_118
buf446 = empty_strided_cuda((4096, 1, 1, 1), (1, 4096, 4096, 4096),
torch.float32)
buf448 = reinterpret_tensor(buf446, (4096, 1, 1, 1), (1, 1, 1, 1), 0)
del buf446
buf449 = empty_strided_cuda((4096, 1024, 1, 1), (1024, 1, 1024,
1024), torch.float32)
triton_per_fused_add_div_sqrt_sub_var_mean_40[grid(4096)](buf448,
primals_119, buf449, 4096, 1024, num_warps=8, num_stages=1)
buf450 = extern_kernels.convolution(buf444, buf449, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf450, (4, 4096, 4, 4), (65536, 1, 16384, 4096))
buf451 = buf441
del buf441
buf452 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
buf454 = empty_strided_cuda((4, 32, 1, 1), (32, 1, 128, 128), torch
.float32)
triton_red_fused_native_group_norm_41[grid(128)](buf450, buf451,
buf452, buf454, 128, 2048, XBLOCK=1, RBLOCK=2048, num_warps=16,
num_stages=1)
buf455 = empty_strided_cuda((4, 4096, 4, 4), (65536, 16, 4, 1),
torch.float32)
triton_poi_fused_add_native_group_norm_relu_45[grid(64, 4096)](buf422,
buf450, buf451, buf452, primals_120, primals_121, buf455, 64,
4096, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del buf452
del primals_121
buf456 = empty_strided_cuda((4, 4096, 4, 4), (65536, 1, 16384, 4096
), torch.bool)
triton_poi_fused_threshold_backward_46[grid(16384, 16)](buf455,
buf456, 16384, 16, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
return (buf455, buf0, buf1, primals_3, primals_5, primals_6, primals_8,
primals_9, buf2, primals_12, primals_14, primals_15, primals_17,
primals_18, buf3, primals_21, primals_23, primals_24, primals_26,
primals_27, buf4, primals_30, primals_32, primals_33, primals_35,
primals_36, buf5, primals_39, primals_41, primals_42, primals_44,
primals_45, primals_47, primals_48, buf6, primals_51, primals_53,
primals_54, primals_56, primals_57, buf7, primals_60, primals_62,
primals_63, primals_65, primals_66, buf8, primals_69, primals_71,
primals_72, primals_74, primals_75, buf9, primals_78, primals_80,
primals_81, primals_83, primals_84, primals_86, primals_87, buf10,
primals_90, primals_92, primals_93, primals_95, primals_96, buf11,
primals_99, primals_101, primals_102, primals_104, primals_105,
buf12, primals_108, primals_110, primals_111, primals_113,
primals_114, buf13, primals_117, primals_119, primals_120, buf17,
buf18, buf19, reinterpret_tensor(buf20, (4, 32), (32, 1), 0),
reinterpret_tensor(buf23, (4, 32), (32, 1), 0), buf24, buf25, buf26,
buf30, buf31, buf32, reinterpret_tensor(buf33, (4, 1024), (1024, 1),
0), reinterpret_tensor(buf36, (4, 1024), (1024, 1), 0), buf40,
buf41, buf42, reinterpret_tensor(buf43, (4, 32), (32, 1), 0),
reinterpret_tensor(buf46, (4, 32), (32, 1), 0), buf47, buf51, buf52,
buf53, reinterpret_tensor(buf54, (4, 32), (32, 1), 0),
reinterpret_tensor(buf57, (4, 32), (32, 1), 0), buf58, buf62, buf63,
buf64, reinterpret_tensor(buf65, (4, 32), (32, 1), 0),
reinterpret_tensor(buf68, (4, 32), (32, 1), 0), buf70, buf74, buf75,
buf76, reinterpret_tensor(buf77, (4, 32), (32, 1), 0),
reinterpret_tensor(buf80, (4, 32), (32, 1), 0), buf81, buf85, buf86,
buf87, reinterpret_tensor(buf88, (4, 32), (32, 1), 0),
reinterpret_tensor(buf91, (4, 32), (32, 1), 0), buf92, buf96, buf97,
buf98, reinterpret_tensor(buf99, (4, 32), (32, 1), 0),
reinterpret_tensor(buf102, (4, 32), (32, 1), 0), buf103, buf107,
buf108, buf109, reinterpret_tensor(buf110, (4, 32), (32, 1), 0),
reinterpret_tensor(buf113, (4, 32), (32, 1), 0), buf114, buf118,
buf119, buf120, reinterpret_tensor(buf121, (4, 32), (32, 1), 0),
reinterpret_tensor(buf124, (4, 32), (32, 1), 0), buf125, buf129,
buf130, buf131, reinterpret_tensor(buf132, (4, 32), (32, 1), 0),
reinterpret_tensor(buf135, (4, 32), (32, 1), 0), buf136, buf140,
buf141, buf142, reinterpret_tensor(buf143, (4, 32), (32, 1), 0),
reinterpret_tensor(buf146, (4, 32), (32, 1), 0), buf147, buf151,
buf152, buf153, reinterpret_tensor(buf154, (4, 32), (32, 1), 0),
reinterpret_tensor(buf157, (4, 32), (32, 1), 0), buf158, buf162,
buf163, buf164, reinterpret_tensor(buf165, (4, 32), (32, 1), 0),
reinterpret_tensor(buf168, (4, 32), (32, 1), 0), buf169, buf173,
buf174, buf175, reinterpret_tensor(buf176, (4, 2048), (2048, 1), 0),
reinterpret_tensor(buf179, (4, 2048), (2048, 1), 0), buf183, buf184,
buf185, reinterpret_tensor(buf186, (4, 32), (32, 1), 0),
reinterpret_tensor(buf189, (4, 32), (32, 1), 0), buf190, buf194,
buf195, buf196, reinterpret_tensor(buf197, (4, 32), (32, 1), 0),
reinterpret_tensor(buf200, (4, 32), (32, 1), 0), buf201, buf205,
buf206, buf207, reinterpret_tensor(buf208, (4, 32), (32, 1), 0),
reinterpret_tensor(buf211, (4, 32), (32, 1), 0), buf213, buf217,
buf218, buf219, reinterpret_tensor(buf220, (4, 32), (32, 1), 0),
reinterpret_tensor(buf223, (4, 32), (32, 1), 0), buf224, buf228,
buf229, buf230, reinterpret_tensor(buf231, (4, 32), (32, 1), 0),
reinterpret_tensor(buf234, (4, 32), (32, 1), 0), buf235, buf239,
buf240, buf241, reinterpret_tensor(buf242, (4, 32), (32, 1), 0),
reinterpret_tensor(buf245, (4, 32), (32, 1), 0), buf246, buf250,
buf251, buf252, reinterpret_tensor(buf253, (4, 32), (32, 1), 0),
reinterpret_tensor(buf256, (4, 32), (32, 1), 0), buf257, buf261,
buf262, buf263, reinterpret_tensor(buf264, (4, 32), (32, 1), 0),
reinterpret_tensor(buf267, (4, 32), (32, 1), 0), buf268, buf272,
buf273, buf274, reinterpret_tensor(buf275, (4, 32), (32, 1), 0),
reinterpret_tensor(buf278, (4, 32), (32, 1), 0), buf279, buf283,
buf284, buf285, reinterpret_tensor(buf286, (4, 32), (32, 1), 0),
reinterpret_tensor(buf289, (4, 32), (32, 1), 0), buf290, buf294,
buf295, buf296, reinterpret_tensor(buf297, (4, 32), (32, 1), 0),
reinterpret_tensor(buf300, (4, 32), (32, 1), 0), buf301, buf305,
buf306, buf307, reinterpret_tensor(buf308, (4, 32), (32, 1), 0),
reinterpret_tensor(buf311, (4, 32), (32, 1), 0), buf312, buf316,
buf317, buf318, reinterpret_tensor(buf319, (4, 4096), (4096, 1), 0),
reinterpret_tensor(buf322, (4, 4096), (4096, 1), 0), buf326, buf327,
buf328, reinterpret_tensor(buf329, (4, 32), (32, 1), 0),
reinterpret_tensor(buf332, (4, 32), (32, 1), 0), buf333, buf337,
buf338, buf339, reinterpret_tensor(buf340, (4, 32), (32, 1), 0),
reinterpret_tensor(buf343, (4, 32), (32, 1), 0), buf344, buf348,
buf349, buf350, reinterpret_tensor(buf351, (4, 32), (32, 1), 0),
reinterpret_tensor(buf354, (4, 32), (32, 1), 0), buf356, buf360,
buf361, buf362, reinterpret_tensor(buf363, (4, 32), (32, 1), 0),
reinterpret_tensor(buf366, (4, 32), (32, 1), 0), buf367, buf371,
buf372, buf373, reinterpret_tensor(buf374, (4, 32), (32, 1), 0),
reinterpret_tensor(buf377, (4, 32), (32, 1), 0), buf378, buf382,
buf383, buf384, reinterpret_tensor(buf385, (4, 32), (32, 1), 0),
reinterpret_tensor(buf388, (4, 32), (32, 1), 0), buf389, buf393,
buf394, buf395, reinterpret_tensor(buf396, (4, 32), (32, 1), 0),
reinterpret_tensor(buf399, (4, 32), (32, 1), 0), buf400, buf404,
buf405, buf406, reinterpret_tensor(buf407, (4, 32), (32, 1), 0),
reinterpret_tensor(buf410, (4, 32), (32, 1), 0), buf411, buf415,
buf416, buf417, reinterpret_tensor(buf418, (4, 32), (32, 1), 0),
reinterpret_tensor(buf421, (4, 32), (32, 1), 0), buf422, buf426,
buf427, buf428, reinterpret_tensor(buf429, (4, 32), (32, 1), 0),
reinterpret_tensor(buf432, (4, 32), (32, 1), 0), buf433, buf437,
buf438, buf439, reinterpret_tensor(buf440, (4, 32), (32, 1), 0),
reinterpret_tensor(buf443, (4, 32), (32, 1), 0), buf444, buf448,
buf449, buf450, reinterpret_tensor(buf451, (4, 32), (32, 1), 0),
reinterpret_tensor(buf454, (4, 32), (32, 1), 0), buf456)
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1,
bias=bias, groups=groups)
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout)
def forward(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = self.relu(self.gn1(self.conv1(x)))
y = self.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = self.relu(residual + y)
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')
], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')
], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')
], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
self.conv1.weight.copy_(conv1_weight)
self.conv2.weight.copy_(conv2_weight)
self.conv3.weight.copy_(conv3_weight)
self.gn1.weight.copy_(gn1_weight.view(-1))
self.gn1.bias.copy_(gn1_bias.view(-1))
self.gn2.weight.copy_(gn2_weight.view(-1))
self.gn2.bias.copy_(gn2_bias.view(-1))
self.gn3.weight.copy_(gn3_weight.view(-1))
self.gn3.bias.copy_(gn3_bias.view(-1))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit,
'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/bias')])
self.downsample.weight.copy_(proj_conv_weight)
self.gn_proj.weight.copy_(proj_gn_weight.view(-1))
self.gn_proj.bias.copy_(proj_gn_bias.view(-1))
class ResNetV2New(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor):
super().__init__()
width = int(64 * width_factor)
self.width = width
self.root = nn.Sequential(OrderedDict([('conv', StdConv2d(3, width,
kernel_size=7, stride=2, bias=False, padding=3)), ('gn', nn.
GroupNorm(32, width, eps=1e-06)), ('relu', nn.ReLU(inplace=True
)), ('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0))]))
self.body = nn.Sequential(OrderedDict([('block1', nn.Sequential(
OrderedDict([('unit1', PreActBottleneck(cin=width, cout=width *
4, cmid=width))] + [(f'unit{i:d}', PreActBottleneck(cin=width *
4, cout=width * 4, cmid=width)) for i in range(2, block_units[0
] + 1)]))), ('block2', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 4, cout=width * 8, cmid=width * 2,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 8,
cout=width * 8, cmid=width * 2)) for i in range(2, block_units[
1] + 1)]))), ('block3', nn.Sequential(OrderedDict([('unit1',
PreActBottleneck(cin=width * 8, cout=width * 16, cmid=width * 4,
stride=2))] + [(f'unit{i:d}', PreActBottleneck(cin=width * 16,
cout=width * 16, cmid=width * 4)) for i in range(2, block_units
[2] + 1)])))]))
def forward(self, input_0):
primals_1 = self.root.conv.weight
primals_3 = self.root.gn.weight
primals_4 = self.root.gn.bias
primals_9 = self.body.block1.unit1.gn1.weight
primals_10 = self.body.block1.unit1.gn1.bias
primals_8 = self.body.block1.unit1.conv1.weight
primals_12 = self.body.block1.unit1.gn2.weight
primals_13 = self.body.block1.unit1.gn2.bias
primals_11 = self.body.block1.unit1.conv2.weight
primals_6 = self.body.block1.unit1.gn3.weight
primals_7 = self.body.block1.unit1.gn3.bias
primals_5 = self.body.block1.unit1.conv3.weight
primals_14 = self.body.block1.unit1.downsample.weight
primals_15 = self.body.block1.unit1.gn_proj.weight
primals_16 = self.body.block1.unit1.gn_proj.bias
primals_18 = self.body.block1.unit2.gn1.weight
primals_19 = self.body.block1.unit2.gn1.bias
primals_17 = self.body.block1.unit2.conv1.weight
primals_21 = self.body.block1.unit2.gn2.weight
primals_22 = self.body.block1.unit2.gn2.bias
primals_20 = self.body.block1.unit2.conv2.weight
primals_24 = self.body.block1.unit2.gn3.weight
primals_25 = self.body.block1.unit2.gn3.bias
primals_23 = self.body.block1.unit2.conv3.weight
primals_27 = self.body.block1.unit3.gn1.weight
primals_28 = self.body.block1.unit3.gn1.bias
primals_26 = self.body.block1.unit3.conv1.weight
primals_30 = self.body.block1.unit3.gn2.weight
primals_31 = self.body.block1.unit3.gn2.bias
primals_29 = self.body.block1.unit3.conv2.weight
primals_33 = self.body.block1.unit3.gn3.weight
primals_34 = self.body.block1.unit3.gn3.bias
primals_32 = self.body.block1.unit3.conv3.weight
primals_36 = self.body.block1.unit4.gn1.weight
primals_37 = self.body.block1.unit4.gn1.bias
primals_35 = self.body.block1.unit4.conv1.weight
primals_39 = self.body.block1.unit4.gn2.weight
primals_40 = self.body.block1.unit4.gn2.bias
primals_38 = self.body.block1.unit4.conv2.weight
primals_42 = self.body.block1.unit4.gn3.weight
primals_43 = self.body.block1.unit4.gn3.bias
primals_41 = self.body.block1.unit4.conv3.weight
primals_48 = self.body.block2.unit1.gn1.weight
primals_49 = self.body.block2.unit1.gn1.bias
primals_47 = self.body.block2.unit1.conv1.weight
primals_51 = self.body.block2.unit1.gn2.weight
primals_52 = self.body.block2.unit1.gn2.bias
primals_50 = self.body.block2.unit1.conv2.weight
primals_45 = self.body.block2.unit1.gn3.weight
primals_46 = self.body.block2.unit1.gn3.bias
primals_53 = self.body.block2.unit1.conv3.weight
primals_44 = self.body.block2.unit1.downsample.weight
primals_54 = self.body.block2.unit1.gn_proj.weight
primals_55 = self.body.block2.unit1.gn_proj.bias
primals_57 = self.body.block2.unit2.gn1.weight
primals_58 = self.body.block2.unit2.gn1.bias
primals_56 = self.body.block2.unit2.conv1.weight
primals_60 = self.body.block2.unit2.gn2.weight
primals_61 = self.body.block2.unit2.gn2.bias
primals_59 = self.body.block2.unit2.conv2.weight
primals_63 = self.body.block2.unit2.gn3.weight
primals_64 = self.body.block2.unit2.gn3.bias
primals_62 = self.body.block2.unit2.conv3.weight
primals_66 = self.body.block2.unit3.gn1.weight
primals_67 = self.body.block2.unit3.gn1.bias
primals_65 = self.body.block2.unit3.conv1.weight
primals_69 = self.body.block2.unit3.gn2.weight
primals_70 = self.body.block2.unit3.gn2.bias
primals_68 = self.body.block2.unit3.conv2.weight
primals_72 = self.body.block2.unit3.gn3.weight
primals_73 = self.body.block2.unit3.gn3.bias
primals_71 = self.body.block2.unit3.conv3.weight
primals_75 = self.body.block2.unit4.gn1.weight
primals_76 = self.body.block2.unit4.gn1.bias
primals_74 = self.body.block2.unit4.conv1.weight
primals_78 = self.body.block2.unit4.gn2.weight
primals_79 = self.body.block2.unit4.gn2.bias
primals_77 = self.body.block2.unit4.conv2.weight
primals_81 = self.body.block2.unit4.gn3.weight
primals_82 = self.body.block2.unit4.gn3.bias
primals_80 = self.body.block2.unit4.conv3.weight
primals_87 = self.body.block3.unit1.gn1.weight
primals_88 = self.body.block3.unit1.gn1.bias
primals_86 = self.body.block3.unit1.conv1.weight
primals_90 = self.body.block3.unit1.gn2.weight
primals_91 = self.body.block3.unit1.gn2.bias
primals_89 = self.body.block3.unit1.conv2.weight
primals_84 = self.body.block3.unit1.gn3.weight
primals_85 = self.body.block3.unit1.gn3.bias
primals_92 = self.body.block3.unit1.conv3.weight
primals_83 = self.body.block3.unit1.downsample.weight
primals_93 = self.body.block3.unit1.gn_proj.weight
primals_94 = self.body.block3.unit1.gn_proj.bias
primals_96 = self.body.block3.unit2.gn1.weight
primals_97 = self.body.block3.unit2.gn1.bias
primals_95 = self.body.block3.unit2.conv1.weight
primals_99 = self.body.block3.unit2.gn2.weight
primals_100 = self.body.block3.unit2.gn2.bias
primals_98 = self.body.block3.unit2.conv2.weight
primals_102 = self.body.block3.unit2.gn3.weight
primals_103 = self.body.block3.unit2.gn3.bias
primals_101 = self.body.block3.unit2.conv3.weight
primals_105 = self.body.block3.unit3.gn1.weight
primals_106 = self.body.block3.unit3.gn1.bias
primals_104 = self.body.block3.unit3.conv1.weight
primals_108 = self.body.block3.unit3.gn2.weight
primals_109 = self.body.block3.unit3.gn2.bias
primals_107 = self.body.block3.unit3.conv2.weight
primals_111 = self.body.block3.unit3.gn3.weight
primals_112 = self.body.block3.unit3.gn3.bias
primals_110 = self.body.block3.unit3.conv3.weight
primals_114 = self.body.block3.unit4.gn1.weight
primals_115 = self.body.block3.unit4.gn1.bias
primals_113 = self.body.block3.unit4.conv1.weight
primals_117 = self.body.block3.unit4.gn2.weight
primals_118 = self.body.block3.unit4.gn2.bias
primals_116 = self.body.block3.unit4.conv2.weight
primals_120 = self.body.block3.unit4.gn3.weight
primals_121 = self.body.block3.unit4.gn3.bias
primals_119 = self.body.block3.unit4.conv3.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63, primals_64,
primals_65, primals_66, primals_67, primals_68, primals_69,
primals_70, primals_71, primals_72, primals_73, primals_74,
primals_75, primals_76, primals_77, primals_78, primals_79,
primals_80, primals_81, primals_82, primals_83, primals_84,
primals_85, primals_86, primals_87, primals_88, primals_89,
primals_90, primals_91, primals_92, primals_93, primals_94,
primals_95, primals_96, primals_97, primals_98, primals_99,
primals_100, primals_101, primals_102, primals_103, primals_104,
primals_105, primals_106, primals_107, primals_108, primals_109,
primals_110, primals_111, primals_112, primals_113, primals_114,
primals_115, primals_116, primals_117, primals_118, primals_119,
primals_120, primals_121])
return output[0]
| HodaEb/ViT-pytorch | ResNetV2 | false | 3,009 | [
"MIT"
] | 0 | 2643740b1d846ae666635bb0f5a71bceba208675 | https://github.com/HodaEb/ViT-pytorch/tree/2643740b1d846ae666635bb0f5a71bceba208675 | import torch
import torch.nn as nn
from collections import OrderedDict
import torch.nn.functional as F
def conv1x1(cin, cout, stride=1, bias=False):
return StdConv2d(cin, cout, kernel_size=1, stride=stride, padding=0,
bias=bias)
def conv3x3(cin, cout, stride=1, groups=1, bias=False):
return StdConv2d(cin, cout, kernel_size=3, stride=stride, padding=1,
bias=bias, groups=groups)
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
class StdConv2d(nn.Conv2d):
def forward(self, x):
w = self.weight
v, m = torch.var_mean(w, dim=[1, 2, 3], keepdim=True, unbiased=False)
w = (w - m) / torch.sqrt(v + 1e-05)
return F.conv2d(x, w, self.bias, self.stride, self.padding, self.
dilation, self.groups)
class PreActBottleneck(nn.Module):
"""Pre-activation (v2) bottleneck block.
"""
def __init__(self, cin, cout=None, cmid=None, stride=1):
super().__init__()
cout = cout or cin
cmid = cmid or cout // 4
self.gn1 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv1 = conv1x1(cin, cmid, bias=False)
self.gn2 = nn.GroupNorm(32, cmid, eps=1e-06)
self.conv2 = conv3x3(cmid, cmid, stride, bias=False)
self.gn3 = nn.GroupNorm(32, cout, eps=1e-06)
self.conv3 = conv1x1(cmid, cout, bias=False)
self.relu = nn.ReLU(inplace=True)
if stride != 1 or cin != cout:
self.downsample = conv1x1(cin, cout, stride, bias=False)
self.gn_proj = nn.GroupNorm(cout, cout)
def forward(self, x):
residual = x
if hasattr(self, 'downsample'):
residual = self.downsample(x)
residual = self.gn_proj(residual)
y = self.relu(self.gn1(self.conv1(x)))
y = self.relu(self.gn2(self.conv2(y)))
y = self.gn3(self.conv3(y))
y = self.relu(residual + y)
return y
def load_from(self, weights, n_block, n_unit):
conv1_weight = np2th(weights[pjoin(n_block, n_unit, 'conv1/kernel')
], conv=True)
conv2_weight = np2th(weights[pjoin(n_block, n_unit, 'conv2/kernel')
], conv=True)
conv3_weight = np2th(weights[pjoin(n_block, n_unit, 'conv3/kernel')
], conv=True)
gn1_weight = np2th(weights[pjoin(n_block, n_unit, 'gn1/scale')])
gn1_bias = np2th(weights[pjoin(n_block, n_unit, 'gn1/bias')])
gn2_weight = np2th(weights[pjoin(n_block, n_unit, 'gn2/scale')])
gn2_bias = np2th(weights[pjoin(n_block, n_unit, 'gn2/bias')])
gn3_weight = np2th(weights[pjoin(n_block, n_unit, 'gn3/scale')])
gn3_bias = np2th(weights[pjoin(n_block, n_unit, 'gn3/bias')])
self.conv1.weight.copy_(conv1_weight)
self.conv2.weight.copy_(conv2_weight)
self.conv3.weight.copy_(conv3_weight)
self.gn1.weight.copy_(gn1_weight.view(-1))
self.gn1.bias.copy_(gn1_bias.view(-1))
self.gn2.weight.copy_(gn2_weight.view(-1))
self.gn2.bias.copy_(gn2_bias.view(-1))
self.gn3.weight.copy_(gn3_weight.view(-1))
self.gn3.bias.copy_(gn3_bias.view(-1))
if hasattr(self, 'downsample'):
proj_conv_weight = np2th(weights[pjoin(n_block, n_unit,
'conv_proj/kernel')], conv=True)
proj_gn_weight = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/scale')])
proj_gn_bias = np2th(weights[pjoin(n_block, n_unit,
'gn_proj/bias')])
self.downsample.weight.copy_(proj_conv_weight)
self.gn_proj.weight.copy_(proj_gn_weight.view(-1))
self.gn_proj.bias.copy_(proj_gn_bias.view(-1))
class Model(nn.Module):
"""Implementation of Pre-activation (v2) ResNet mode."""
def __init__(self, block_units, width_factor):
super().__init__()
width = int(64 * width_factor)
self.width
# ... truncated (>4000 chars) for memory efficiency |
TransformerLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5t/c5txhhxqwgxdtwmgseoxqhkhd47b4q5u6chxi7shnrpkkyfwlubq.py
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2j/c2jp3ao3qc7s2piwrwrsa4nfhuwr2y6p5vmtnr3avjozont334gm.py
# Topologically Sorted Source Nodes: [scores, max_1], Original ATen: [aten.div, aten.max]
# Source node to ATen node mapping:
# max_1 => max_1
# scores => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %max_1 : [num_users=2] = call_function[target=torch.ops.aten.max.default](args = (%div,), kwargs = {})
triton_per_fused_div_max_1 = async_compile.triton('triton_per_fused_div_max_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_max_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_max_1(in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp5, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sh/cshxyr7ubygdgkvu2jcbl36m76i56v5sozjjpqddp4enliefpxwc.py
# Topologically Sorted Source Nodes: [scores, sub, scores_1], Original ATen: [aten.div, aten.sub, aten._softmax]
# Source node to ATen node mapping:
# scores => div
# scores_1 => amax, exp, sub_1, sum_1
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
triton_poi_fused__softmax_div_sub_2 = async_compile.triton('triton_poi_fused__softmax_div_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp7 - tmp4
tmp9 = triton_helpers.maximum(tmp5, tmp8)
tmp11 = tmp10 * tmp1
tmp12 = tmp11 - tmp4
tmp13 = triton_helpers.maximum(tmp9, tmp12)
tmp15 = tmp14 * tmp1
tmp16 = tmp15 - tmp4
tmp17 = triton_helpers.maximum(tmp13, tmp16)
tmp18 = tmp5 - tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp8 - tmp17
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp17
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp16 - tmp17
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tl.store(out_ptr0 + (x0), tmp17, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ab/cabhniu4tyer6exl4prbgpjbgmqgwyhnmgevuz4airuaxunzvkui.py
# Topologically Sorted Source Nodes: [scores, sub, scores_1, isnan, scores_2], Original ATen: [aten.div, aten.sub, aten._softmax, aten.isnan, aten.masked_fill]
# Source node to ATen node mapping:
# isnan => isnan
# scores => div
# scores_1 => amax, div_1, exp, sub_1, sum_1
# scores_2 => full_default, where
# sub => sub
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_11, 1.0), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %max_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %isnan : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%div_1,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%isnan, %full_default, %div_1), kwargs = {})
triton_poi_fused__softmax_div_isnan_masked_fill_sub_3 = async_compile.triton('triton_poi_fused__softmax_div_isnan_masked_fill_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_isnan_masked_fill_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_isnan_masked_fill_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = libdevice.isnan(tmp10).to(tl.int1)
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp10)
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vx/cvxrwjqa2qth3hfo6pxstqbehlnqwizdbegfslmraxs56veuysyz.py
# Topologically Sorted Source Nodes: [add, context], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# context => clone_4, var_mean
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %primals_1), kwargs = {})
# %clone_4 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone_4, [3]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/67/c67qvoroedk6zxxdpr6igvsjiydx7bg3oz2cdth2423diom3w7dm.py
# Topologically Sorted Source Nodes: [add, context], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# add => add
# context => add_1, add_2, clone_4, mul, mul_1, rsqrt, sub_2
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_15, %primals_1), kwargs = {})
# %clone_4 : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%add,), kwargs = {memory_format: torch.contiguous_format})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone_4, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_5 = async_compile.triton('triton_poi_fused_add_native_layer_norm_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + (4*y3)), xmask & ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (y3), ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + (4*y3)), tmp13, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ym/cymjdl4jremmlezrasukduos64nkcoomx2jrrtrkxixa5iteyvhx.py
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# output_2 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_17,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_6 = async_compile.triton('triton_poi_fused_relu_threshold_backward_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/fr/cfreijp4bpdtnon3b67mlutxtoel4lotrcwh4d2owdbsiofxegzs.py
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_1 => add_3
# Graph fragment:
# %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_19, %add_2), kwargs = {})
triton_poi_fused_add_7 = async_compile.triton('triton_poi_fused_add_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pu/cpuitszhfa7qevbwvjkt4j3z6q76v5w75uixnffsk2xkzdjeklx3.py
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output_4 => add_4, rsqrt_1, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
triton_poi_fused_native_layer_norm_8 = async_compile.triton('triton_poi_fused_native_layer_norm_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/tz/ctzpqaywjacs5zhp3wtmcjrymqua2ennrme36hv2kl5zn5qyclpt.py
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# output_4 => add_4, add_5, mul_2, mul_3, rsqrt_1, sub_3, var_mean_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %rsqrt_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %primals_14), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %primals_15), kwargs = {})
triton_poi_fused_native_layer_norm_9 = async_compile.triton('triton_poi_fused_native_layer_norm_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf0, primals_3, buf3, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf1, primals_5, buf4, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [scores, max_1], Original ATen: [aten.div, aten.max]
triton_per_fused_div_max_1.run(buf5, buf6, 1, 1024, grid=grid(1), stream=stream0)
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf1 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256), torch.float32)
# Topologically Sorted Source Nodes: [scores, sub, scores_1], Original ATen: [aten.div, aten.sub, aten._softmax]
triton_poi_fused__softmax_div_sub_2.run(buf5, buf6, buf7, buf8, 256, grid=grid(256), stream=stream0)
buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores, sub, scores_1, isnan, scores_2], Original ATen: [aten.div, aten.sub, aten._softmax, aten.isnan, aten.masked_fill]
triton_poi_fused__softmax_div_isnan_masked_fill_sub_3.run(buf5, buf6, buf7, buf8, buf9, 1024, grid=grid(1024), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(buf2, primals_7, buf10, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_7
buf11 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf9, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf10, (64, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [add, context], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(buf11, primals_1, buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [add, context], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_5.run(buf11, primals_1, buf12, buf13, primals_8, primals_9, buf14, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_9
buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf15 # reuse
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_6.run(buf16, primals_11, buf22, 256, grid=grid(256), stream=stream0)
del primals_11
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0), reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [add_1], Original ATen: [aten.add]
triton_poi_fused_add_7.run(buf18, primals_13, buf14, 256, grid=grid(256), stream=stream0)
del primals_13
buf19 = buf13; del buf13 # reuse
buf20 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_8.run(buf18, buf19, buf20, 64, grid=grid(64), stream=stream0)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_9.run(buf18, buf19, buf20, primals_14, primals_15, buf21, 256, grid=grid(256), stream=stream0)
del buf19
del buf20
del primals_15
return (buf21, primals_1, primals_8, primals_14, buf5, buf6, buf11, reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor(buf16, (64, 4), (4, 1), 0), buf18, primals_12, buf22, primals_10, reinterpret_tensor(buf9, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf10, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It has projection layer for getting keys, queries and values. Followed by attention.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
def head_split(self, x):
new_x_shape = x.size()[:-1] + (self.h, self.d_k)
return x.view(*new_x_shape).transpose(-2, -3)
def forward(self, q, k, v, mask=None):
origin_shape = q.size()
if not self.kq_same:
q = self.head_split(self.q_linear(q))
else:
q = self.head_split(self.k_linear(q))
k = self.head_split(self.k_linear(k))
v = self.head_split(self.v_linear(v))
output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask)
output = output.transpose(-2, -3).reshape(origin_shape)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, d_k, mask=None):
"""
This is called by Multi-head attention object to find the values.
"""
scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5
if mask is not None:
scores = scores.masked_fill(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
scores = scores.masked_fill(torch.isnan(scores), 0)
output = torch.matmul(scores, v)
return output
class TransformerLayer(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
"""
This is a Basic Block of Transformer. It contains one Multi-head attention object.
Followed by layer norm and position wise feedforward net and dropout layer.
"""
self.masked_attn_head = MultiHeadAttention(d_model, n_heads,
kq_same=kq_same)
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, seq, mask=None):
context = self.masked_attn_head(seq, seq, seq, mask)
context = self.layer_norm1(self.dropout1(context) + seq)
output = self.linear1(context).relu()
output = self.linear2(output)
output = self.layer_norm2(self.dropout2(output) + context)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'd_ff': 4, 'n_heads': 4, 'dropout': 0.5}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
import torch.distributions
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask)
@triton.jit
def triton_per_fused_div_max_1(in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp3, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp5, None)
@triton.jit
def triton_poi_fused__softmax_div_sub_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp7 - tmp4
tmp9 = triton_helpers.maximum(tmp5, tmp8)
tmp11 = tmp10 * tmp1
tmp12 = tmp11 - tmp4
tmp13 = triton_helpers.maximum(tmp9, tmp12)
tmp15 = tmp14 * tmp1
tmp16 = tmp15 - tmp4
tmp17 = triton_helpers.maximum(tmp13, tmp16)
tmp18 = tmp5 - tmp17
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp8 - tmp17
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp19 + tmp21
tmp23 = tmp12 - tmp17
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp16 - tmp17
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tl.store(out_ptr0 + x0, tmp17, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused__softmax_div_isnan_masked_fill_sub_3(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp5 = tmp2 - tmp4
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = libdevice.isnan(tmp10).to(tl.int1)
tmp12 = 0.0
tmp13 = tl.where(tmp11, tmp12, tmp10)
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_5(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2 + 4 * y3), xmask & ymask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + y3, ymask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2 + 4 * y3), tmp13, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_6(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_7(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_8(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_9(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 4), (4, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch
.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](buf0, primals_3, buf3, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_3
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 1, 4), (64, 16, 4, 4, 1), 0)
del buf0
triton_poi_fused_clone_0[grid(64, 4)](buf1, primals_5, buf4, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0),
0), reinterpret_tensor(buf4, (64, 1, 4), (4, 0, 1), 0), out=buf5)
buf6 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_div_max_1[grid(1)](buf5, buf6, 1, 1024, num_warps=
8, num_stages=1)
buf7 = reinterpret_tensor(buf1, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf1
buf8 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 256),
torch.float32)
triton_poi_fused__softmax_div_sub_2[grid(256)](buf5, buf6, buf7,
buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
buf9 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused__softmax_div_isnan_masked_fill_sub_3[grid(1024)](buf5,
buf6, buf7, buf8, buf9, 1024, XBLOCK=128, num_warps=4, num_stages=1
)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf8
triton_poi_fused_clone_0[grid(64, 4)](buf2, primals_7, buf10, 64, 4,
XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1)
del primals_7
buf11 = reinterpret_tensor(buf2, (64, 4, 1), (4, 1, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf9, (64, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf10, (64, 4, 1), (4, 1, 0), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(64)](buf11, primals_1,
buf12, buf13, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf14 = reinterpret_tensor(buf7, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf7
triton_poi_fused_add_native_layer_norm_5[grid(64, 4)](buf11,
primals_1, buf12, buf13, primals_8, primals_9, buf14, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_9
buf15 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf14, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), out=buf15)
buf16 = reinterpret_tensor(buf15, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf15
buf22 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_6[grid(256)](buf16,
primals_11, buf22, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
buf17 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf16, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_12, (4, 4), (1, 4), 0), out=buf17)
buf18 = reinterpret_tensor(buf17, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf17
triton_poi_fused_add_7[grid(256)](buf18, primals_13, buf14, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf19 = buf13
del buf13
buf20 = buf12
del buf12
triton_poi_fused_native_layer_norm_8[grid(64)](buf18, buf19, buf20,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_native_layer_norm_9[grid(256)](buf18, buf19, buf20,
primals_14, primals_15, buf21, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del buf19
del buf20
del primals_15
return (buf21, primals_1, primals_8, primals_14, buf5, buf6, buf11,
reinterpret_tensor(buf14, (64, 4), (4, 1), 0), reinterpret_tensor(
buf16, (64, 4), (4, 1), 0), buf18, primals_12, buf22, primals_10,
reinterpret_tensor(buf9, (64, 4, 4), (16, 1, 4), 0),
reinterpret_tensor(buf10, (64, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf3, (64, 1, 4), (4, 1, 1), 0),
reinterpret_tensor(buf4, (64, 4, 1), (4, 1, 4), 0))
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It has projection layer for getting keys, queries and values. Followed by attention.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
def head_split(self, x):
new_x_shape = x.size()[:-1] + (self.h, self.d_k)
return x.view(*new_x_shape).transpose(-2, -3)
def forward(self, q, k, v, mask=None):
origin_shape = q.size()
if not self.kq_same:
q = self.head_split(self.q_linear(q))
else:
q = self.head_split(self.k_linear(q))
k = self.head_split(self.k_linear(k))
v = self.head_split(self.v_linear(v))
output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask)
output = output.transpose(-2, -3).reshape(origin_shape)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, d_k, mask=None):
"""
This is called by Multi-head attention object to find the values.
"""
scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5
if mask is not None:
scores = scores.masked_fill(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
scores = scores.masked_fill(torch.isnan(scores), 0)
output = torch.matmul(scores, v)
return output
class TransformerLayerNew(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
"""
This is a Basic Block of Transformer. It contains one Multi-head attention object.
Followed by layer norm and position wise feedforward net and dropout layer.
"""
self.masked_attn_head = MultiHeadAttention(d_model, n_heads,
kq_same=kq_same)
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, input_0):
primals_2 = self.masked_attn_head.q_linear.weight
primals_3 = self.masked_attn_head.q_linear.bias
primals_4 = self.masked_attn_head.k_linear.weight
primals_5 = self.masked_attn_head.k_linear.bias
primals_6 = self.masked_attn_head.v_linear.weight
primals_7 = self.masked_attn_head.v_linear.bias
primals_8 = self.layer_norm1.weight
primals_9 = self.layer_norm1.bias
primals_10 = self.linear1.weight
primals_11 = self.linear1.bias
primals_12 = self.linear2.weight
primals_13 = self.linear2.bias
primals_14 = self.layer_norm2.weight
primals_15 = self.layer_norm2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| Yingting-dev/ReChorus | TransformerLayer | false | 3,010 | [
"MIT"
] | 0 | a16bc1e42f3e90e889133d7476c52ada44db573b | https://github.com/Yingting-dev/ReChorus/tree/a16bc1e42f3e90e889133d7476c52ada44db573b | import torch
import numpy as np
import torch.nn as nn
import torch.distributions
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, n_heads, kq_same=False, bias=True):
super().__init__()
"""
It has projection layer for getting keys, queries and values. Followed by attention.
"""
self.d_model = d_model
self.h = n_heads
self.d_k = self.d_model // self.h
self.kq_same = kq_same
if not kq_same:
self.q_linear = nn.Linear(d_model, d_model, bias=bias)
self.k_linear = nn.Linear(d_model, d_model, bias=bias)
self.v_linear = nn.Linear(d_model, d_model, bias=bias)
def head_split(self, x):
new_x_shape = x.size()[:-1] + (self.h, self.d_k)
return x.view(*new_x_shape).transpose(-2, -3)
def forward(self, q, k, v, mask=None):
origin_shape = q.size()
if not self.kq_same:
q = self.head_split(self.q_linear(q))
else:
q = self.head_split(self.k_linear(q))
k = self.head_split(self.k_linear(k))
v = self.head_split(self.v_linear(v))
output = self.scaled_dot_product_attention(q, k, v, self.d_k, mask)
output = output.transpose(-2, -3).reshape(origin_shape)
return output
@staticmethod
def scaled_dot_product_attention(q, k, v, d_k, mask=None):
"""
This is called by Multi-head attention object to find the values.
"""
scores = torch.matmul(q, k.transpose(-2, -1)) / d_k ** 0.5
if mask is not None:
scores = scores.masked_fill(mask == 0, -np.inf)
scores = (scores - scores.max()).softmax(dim=-1)
scores = scores.masked_fill(torch.isnan(scores), 0)
output = torch.matmul(scores, v)
return output
class Model(nn.Module):
def __init__(self, d_model, d_ff, n_heads, dropout, kq_same=False):
super().__init__()
"""
This is a Basic Block of Transformer. It contains one Multi-head attention object.
Followed by layer norm and position wise feedforward net and dropout layer.
"""
self.masked_attn_head = MultiHeadAttention(d_model, n_heads,
kq_same=kq_same)
self.layer_norm1 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.linear1 = nn.Linear(d_model, d_ff)
self.linear2 = nn.Linear(d_ff, d_model)
self.layer_norm2 = nn.LayerNorm(d_model)
self.dropout2 = nn.Dropout(dropout)
def forward(self, seq, mask=None):
context = self.masked_attn_head(seq, seq, seq, mask)
context = self.layer_norm1(self.dropout1(context) + seq)
output = self.linear1(context).relu()
output = self.linear2(output)
output = self.layer_norm2(self.dropout2(output) + context)
return output
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4, 0.5]
|
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cj/ccjjwnjmtkiixot4nqawxhor55ao62toktuo4vhf7fi5tdorymjj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jb/cjbldpvlgtobiyzhyyt5o2g7pxih2gtm7x7wcexp3iltxln5xn45.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => tanh
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%view, %primals_2, %primals_3, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_tanh_1 = async_compile.triton('triton_poi_fused_convolution_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256, 1024], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 256
xnumel = 784
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (784*y3)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (y0 + (64*x2) + (50176*y1)), tmp3, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4h/c4hqst7tjhd64d5yoecmsdhppjudyqln2x6gcmdnajbkvxlumwc5.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64) % 14
x2 = (xindex // 896)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (3584*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (3584*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (1792 + x0 + (128*x1) + (3584*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (1856 + x0 + (128*x1) + (3584*x2)), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zg/czgwtxzal5gsglco4zsx4ry76qnglyou5bln2kjitrg3ffw5eg4a.py
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.tanh]
# Source node to ATen node mapping:
# x_4 => convolution_1
# x_5 => tanh_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %tanh_1 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_tanh_3 = async_compile.triton('triton_poi_fused_convolution_tanh_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_tanh_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_tanh_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5i/c5icqzckqg3seqiozzpc32qxwfjarc2aaymgxf3zfmfnm6v6eur4.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_6 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%tanh_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 128], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 100
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = (yindex // 5)
y5 = yindex
y4 = (yindex // 25)
y6 = yindex % 25
tmp0 = tl.load(in_ptr0 + (x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1280 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1408 + x2 + (256*y0) + (2560*y1)), xmask & ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + (128*y5)), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + (25*x2) + (3200*y4)), tmp16, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qb/cqbtnjlu7ynxfpa4fe2k5neonrmcffdd23wmgxvevydwamurtt4e.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_9 => tanh_2
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_7), kwargs = {})
# %tanh_2 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_tanh_5 = async_compile.triton('triton_poi_fused_tanh_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qh/cqhxnfvi5dpvzrnwvgxxdso44xnpfm6gby5gq5d35sxybauqqlvj.py
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# x_11 => tanh_3
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %tanh_3 : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_tanh_6 = async_compile.triton('triton_poi_fused_tanh_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/gb/cgb2r2dkj3edylomyavvn6wff6um64fjrcawtmlxqkjw7pulgczd.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# x_13 => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_7 = async_compile.triton('triton_poi_fused_sigmoid_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (1024, 3200), (3200, 1))
assert_size_stride(primals_7, (1024, ), (1, ))
assert_size_stride(primals_8, (512, 1024), (1024, 1))
assert_size_stride(primals_9, (512, ), (1, ))
assert_size_stride(primals_10, (1, 512), (512, 1))
assert_size_stride(primals_11, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 28, 28), (50176, 784, 28, 1))
buf2 = empty_strided_cuda((4, 64, 28, 28), (50176, 1, 1792, 64), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_1.run(buf1, primals_3, buf2, 256, 784, grid=grid(256, 784), stream=stream0)
del buf1
del primals_3
buf3 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.float32)
buf4 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf2, buf3, buf4, 50176, grid=grid(50176), stream=stream0)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf3, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 10, 10), (12800, 1, 1280, 128))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [x_4, x_5], Original ATen: [aten.convolution, aten.tanh]
triton_poi_fused_convolution_tanh_3.run(buf6, primals_5, 51200, grid=grid(51200), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128), torch.int8)
buf8 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_4.run(buf6, buf7, buf8, 100, 128, grid=grid(100, 128), stream=stream0)
buf9 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf8, (4, 3200), (3200, 1), 0), reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), out=buf9)
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.tanh]
triton_poi_fused_tanh_5.run(buf10, primals_7, 4096, grid=grid(4096), stream=stream0)
del primals_7
buf11 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (1024, 512), (1, 1024), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.tanh]
triton_poi_fused_tanh_6.run(buf12, primals_9, 2048, grid=grid(2048), stream=stream0)
del primals_9
buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf12, reinterpret_tensor(primals_10, (512, 1), (1, 512), 0), out=buf13)
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_7.run(buf14, primals_11, 4, grid=grid(4), stream=stream0)
del primals_11
return (buf14, primals_2, buf0, primals_1, buf2, buf3, buf4, buf6, buf7, reinterpret_tensor(buf8, (4, 3200), (3200, 1), 0), buf10, buf12, buf14, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 28, 28), (784, 784, 28, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 1, 5, 5), (25, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1024, 3200), (3200, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((512, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 512), (512, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2)
self.activation1 = nn.Tanh()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2))
self.conv2 = nn.Conv2d(64, 128, kernel_size=(5, 5))
self.activation2 = nn.Tanh()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2))
self.fc1 = nn.Linear(128 * 5 * 5, 1024)
self.activation3 = nn.Tanh()
self.fc2 = nn.Linear(1024, 512)
self.activation4 = nn.Tanh()
self.fc3 = nn.Linear(512, 1)
self.activation5 = nn.Sigmoid()
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = self.conv1(x)
x = self.activation1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.activation2(x)
x = self.maxpool2(x)
x = x.view(-1, 128 * 5 * 5)
x = self.fc1(x)
x = self.activation3(x)
x = self.fc2(x)
x = self.activation4(x)
x = self.fc3(x)
x = self.activation5(x)
return x
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_tanh_1(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 256
xnumel = 784
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 784 * y3), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (y0 + 64 * x2 + 50176 * y1), tmp3, xmask & ymask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64 % 14
x2 = xindex // 896
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 3584 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 3584 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (1792 + x0 + 128 * x1 + 3584 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (1856 + x0 + 128 * x1 + 3584 * x2), xmask)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_tanh_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, out_ptr1,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 100
xnumel = 128
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 5
y1 = yindex // 5
y5 = yindex
y4 = yindex // 25
y6 = yindex % 25
tmp0 = tl.load(in_ptr0 + (x2 + 256 * y0 + 2560 * y1), xmask & ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (128 + x2 + 256 * y0 + 2560 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1280 + x2 + 256 * y0 + 2560 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (1408 + x2 + 256 * y0 + 2560 * y1), xmask &
ymask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1, 1], 1, tl.int8)
tmp4 = tl.full([1, 1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1, 1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1, 1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2 + 128 * y5), tmp15, xmask & ymask)
tl.store(out_ptr1 + (y6 + 25 * x2 + 3200 * y4), tmp16, xmask & ymask)
@triton.jit
def triton_poi_fused_tanh_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_tanh_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 512
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, None)
@triton.jit
def triton_poi_fused_sigmoid_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 28, 28), (784, 784, 28, 1))
assert_size_stride(primals_2, (64, 1, 5, 5), (25, 25, 5, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (1024, 3200), (3200, 1))
assert_size_stride(primals_7, (1024,), (1,))
assert_size_stride(primals_8, (512, 1024), (1024, 1))
assert_size_stride(primals_9, (512,), (1,))
assert_size_stride(primals_10, (1, 512), (512, 1))
assert_size_stride(primals_11, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(8192, 25)](primals_4, buf0, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 28, 28), (50176, 784, 28, 1))
buf2 = empty_strided_cuda((4, 64, 28, 28), (50176, 1, 1792, 64),
torch.float32)
triton_poi_fused_convolution_tanh_1[grid(256, 784)](buf1, primals_3,
buf2, 256, 784, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf1
del primals_3
buf3 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.float32)
buf4 = empty_strided_cuda((4, 64, 14, 14), (12544, 1, 896, 64),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(50176)](buf2, buf3,
buf4, 50176, XBLOCK=256, num_warps=4, num_stages=1)
buf5 = extern_kernels.convolution(buf3, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 128, 10, 10), (12800, 1, 1280, 128))
buf6 = buf5
del buf5
triton_poi_fused_convolution_tanh_3[grid(51200)](buf6, primals_5,
51200, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 128, 5, 5), (3200, 1, 640, 128),
torch.int8)
buf8 = empty_strided_cuda((4, 128, 5, 5), (3200, 25, 5, 1), torch.
float32)
triton_poi_fused_max_pool2d_with_indices_4[grid(100, 128)](buf6,
buf7, buf8, 100, 128, XBLOCK=128, YBLOCK=2, num_warps=4,
num_stages=1)
buf9 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf8, (4, 3200), (3200, 1), 0),
reinterpret_tensor(primals_6, (3200, 1024), (1, 3200), 0), out=buf9
)
buf10 = buf9
del buf9
triton_poi_fused_tanh_5[grid(4096)](buf10, primals_7, 4096, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
buf11 = empty_strided_cuda((4, 512), (512, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (1024, 512),
(1, 1024), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_tanh_6[grid(2048)](buf12, primals_9, 2048, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf13 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.mm(buf12, reinterpret_tensor(primals_10, (512, 1), (
1, 512), 0), out=buf13)
buf14 = buf13
del buf13
triton_poi_fused_sigmoid_7[grid(4)](buf14, primals_11, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_11
return (buf14, primals_2, buf0, primals_1, buf2, buf3, buf4, buf6, buf7,
reinterpret_tensor(buf8, (4, 3200), (3200, 1), 0), buf10, buf12,
buf14, primals_10, primals_8, primals_6)
class DiscriminatorNew(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2)
self.activation1 = nn.Tanh()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2))
self.conv2 = nn.Conv2d(64, 128, kernel_size=(5, 5))
self.activation2 = nn.Tanh()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2))
self.fc1 = nn.Linear(128 * 5 * 5, 1024)
self.activation3 = nn.Tanh()
self.fc2 = nn.Linear(1024, 512)
self.activation4 = nn.Tanh()
self.fc3 = nn.Linear(512, 1)
self.activation5 = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| Ziems/pytorch-dcgan | Discriminator | false | 3,011 | [
"MIT"
] | 0 | 1a251a330b9b0df6061a10463bce8057f1230797 | https://github.com/Ziems/pytorch-dcgan/tree/1a251a330b9b0df6061a10463bce8057f1230797 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=5, padding=2)
self.activation1 = nn.Tanh()
self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2))
self.conv2 = nn.Conv2d(64, 128, kernel_size=(5, 5))
self.activation2 = nn.Tanh()
self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2))
self.fc1 = nn.Linear(128 * 5 * 5, 1024)
self.activation3 = nn.Tanh()
self.fc2 = nn.Linear(1024, 512)
self.activation4 = nn.Tanh()
self.fc3 = nn.Linear(512, 1)
self.activation5 = nn.Sigmoid()
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = self.conv1(x)
x = self.activation1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.activation2(x)
x = self.maxpool2(x)
x = x.view(-1, 128 * 5 * 5)
x = self.fc1(x)
x = self.activation3(x)
x = self.fc2(x)
x = self.activation4(x)
x = self.fc3(x)
x = self.activation5(x)
return x
def get_inputs():
return [torch.rand([4, 1, 28, 28])]
def get_init_inputs():
return []
|
SelfAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [dist_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dist_1 => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %mul_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_tensor, 0.5), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%mul_tensor_1,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [dist_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# dist_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [q], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [bmm], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dist_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [dist_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [att], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), out=buf6)
return (buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
from torch.autograd import *
class SelfAttention(nn.Module):
dim_in: 'int'
dim_k: 'int'
dim_v: 'int'
def __init__(self, dim_in, dim_k, dim_v):
super(SelfAttention, self).__init__()
self.dim_in = dim_in
self.dim_k = dim_k
self.dim_v = dim_v
self.linear_q = nn.Linear(dim_in, dim_k, bias=False)
self.linear_k = nn.Linear(dim_in, dim_k, bias=False)
self.linear_v = nn.Linear(dim_in, dim_v, bias=False)
self._norm_fact = 1 / math.sqrt(dim_k)
def forward(self, x):
_batch, _n, dim_in = x.shape
assert dim_in == self.dim_in
q = self.linear_q(x)
k = self.linear_k(x)
v = self.linear_v(x)
dist = torch.bmm(q, k.transpose(1, 2)) * self._norm_fact
dist = torch.softmax(dist, dim=-1)
att = torch.bmm(dist, v)
return att
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4, 'dim_k': 4, 'dim_v': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
import torch.nn as nn
from torch.autograd import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 4,
1), 0), out=buf6)
return buf6, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), buf5, reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
class SelfAttentionNew(nn.Module):
dim_in: 'int'
dim_k: 'int'
dim_v: 'int'
def __init__(self, dim_in, dim_k, dim_v):
super(SelfAttentionNew, self).__init__()
self.dim_in = dim_in
self.dim_k = dim_k
self.dim_v = dim_v
self.linear_q = nn.Linear(dim_in, dim_k, bias=False)
self.linear_k = nn.Linear(dim_in, dim_k, bias=False)
self.linear_v = nn.Linear(dim_in, dim_v, bias=False)
self._norm_fact = 1 / math.sqrt(dim_k)
def forward(self, input_0):
primals_2 = self.linear_q.weight
primals_3 = self.linear_k.weight
primals_4 = self.linear_v.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| YapingZ/News-image-caption | SelfAttention | false | 3,012 | [
"Apache-2.0"
] | 0 | fcccf51bbe5607adbf71c1da8ecdc6693555993f | https://github.com/YapingZ/News-image-caption/tree/fcccf51bbe5607adbf71c1da8ecdc6693555993f | import math
import torch
import torch.nn as nn
from torch.autograd import *
class Model(nn.Module):
dim_in: 'int'
dim_k: 'int'
dim_v: 'int'
def __init__(self, dim_in, dim_k, dim_v):
super().__init__()
self.dim_in = dim_in
self.dim_k = dim_k
self.dim_v = dim_v
self.linear_q = nn.Linear(dim_in, dim_k, bias=False)
self.linear_k = nn.Linear(dim_in, dim_k, bias=False)
self.linear_v = nn.Linear(dim_in, dim_v, bias=False)
self._norm_fact = 1 / math.sqrt(dim_k)
def forward(self, x):
_batch, _n, dim_in = x.shape
assert dim_in == self.dim_in
q = self.linear_q(x)
k = self.linear_k(x)
v = self.linear_v(x)
dist = torch.bmm(q, k.transpose(1, 2)) * self._norm_fact
dist = torch.softmax(dist, dim=-1)
att = torch.bmm(dist, v)
return att
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
BasicBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/yw/cywcz4pxnzyvlsoydzxcj5pzlu3i5g7qgj7guhgyvlrzkngzehmv.py
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# out_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/62/c62vdyzlu3lvskzid3jo7oiwnwhbmrkav2u5qcx2zjpp72hnxkny.py
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_3 => add
# out_4 => relu_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_add_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_add_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3, out_4], Original ATen: [aten.add, aten.relu, aten.threshold_backward]
triton_poi_fused_add_relu_threshold_backward_1.run(buf3, primals_1, buf4, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, primals_3, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
import torch.nn as nn
def conv3x3(in_planes, out_planes, dilation=1, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, dilation=1, stride=1, downsample=None
):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, dilation, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x0, tmp4, xmask)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps
=4, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_relu_threshold_backward_1[grid(256)](buf3,
primals_1, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, primals_3, buf1, buf4
def conv3x3(in_planes, out_planes, dilation=1, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False)
class BasicBlockNew(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, dilation=1, stride=1, downsample=None
):
super(BasicBlockNew, self).__init__()
self.conv1 = conv3x3(inplanes, planes, dilation, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv2.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ZurMaD/DAIN | BasicBlock | false | 3,013 | [
"MIT"
] | 0 | 22570e51e84f7dfd48ba4f88e6ee7c9ff1b0b123 | https://github.com/ZurMaD/DAIN/tree/22570e51e84f7dfd48ba4f88e6ee7c9ff1b0b123 | import torch
import torch.utils.data
import torch.nn as nn
def conv3x3(in_planes, out_planes, dilation=1, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=int(dilation * (3 - 1) / 2), dilation=dilation, bias=False)
class Model(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, dilation=1, stride=1, downsample=None
):
super().__init__()
self.conv1 = conv3x3(inplanes, planes, dilation, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
SimpleNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ik/cik7wcr7jeulllkep2ikud45lglbwxhslzt6ushqu56keaxr5qll.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_1 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/c7/cc7y3tshmudbxwh2fqbrg5bejvvqhe42yql3rh4vp4j6mv5ggzhp.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_2 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 12288), (12288, 1))
assert_size_stride(primals_2, (84, 12288), (12288, 1))
assert_size_stride(primals_3, (84, ), (1, ))
assert_size_stride(primals_4, (50, 84), (84, 1))
assert_size_stride(primals_5, (50, ), (1, ))
assert_size_stride(primals_6, (2, 50), (50, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (12288, 84), (1, 12288), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 336, grid=grid(336), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (84, 50), (1, 84), 0), out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu]
triton_poi_fused_relu_1.run(buf3, primals_5, 200, grid=grid(200), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6, (50, 2), (1, 50), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (buf4, primals_1, buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 12288), (12288, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((84, 12288), (12288, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((50, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(12288, 84)
self.fc2 = nn.Linear(84, 50)
self.fc3 = nn.Linear(50, 2)
def forward(self, x):
x = x.view(-1, 12288)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 12288])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 336
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 84
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 12288), (12288, 1))
assert_size_stride(primals_2, (84, 12288), (12288, 1))
assert_size_stride(primals_3, (84,), (1,))
assert_size_stride(primals_4, (50, 84), (84, 1))
assert_size_stride(primals_5, (50,), (1,))
assert_size_stride(primals_6, (2, 50), (50, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (12288,
84), (1, 12288), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(336)](buf1, primals_3, 336, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_4, (84, 50), (1,
84), 0), out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_relu_1[grid(200)](buf3, primals_5, 200, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, buf3, reinterpret_tensor(primals_6,
(50, 2), (1, 50), 0), alpha=1, beta=1, out=buf4)
del primals_7
return buf4, primals_1, buf1, buf3, primals_6, primals_4
class SimpleNetNew(nn.Module):
def __init__(self):
super(SimpleNetNew, self).__init__()
self.fc1 = nn.Linear(12288, 84)
self.fc2 = nn.Linear(84, 50)
self.fc3 = nn.Linear(50, 2)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| aakgun/pytorch-VideoDataset | SimpleNet | false | 3,014 | [
"MIT"
] | 0 | 619e385f37b99bfabca0b814673825ed902242b2 | https://github.com/aakgun/pytorch-VideoDataset/tree/619e385f37b99bfabca0b814673825ed902242b2 | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(12288, 84)
self.fc2 = nn.Linear(84, 50)
self.fc3 = nn.Linear(50, 2)
def forward(self, x):
x = x.view(-1, 12288)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 12288])]
def get_init_inputs():
return []
|
WineLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fa/cfa6bszpns5yhug32unm5jaaxp7nppo2yfrmvmrfxoj6vtium6fq.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.smooth_l1_loss]
# Source node to ATen node mapping:
# loss => abs_1, div, lt, mean, mul, pow_1, sub, sub_1, where
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=3] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%abs_1, 1.0), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%abs_1, 2), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 1.0), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%abs_1, 0.5), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%lt, %div, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%where,), kwargs = {})
triton_per_fused_smooth_l1_loss_0 = async_compile.triton('triton_per_fused_smooth_l1_loss_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_smooth_l1_loss_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp7
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.smooth_l1_loss]
stream0 = get_raw_stream(0)
triton_per_fused_smooth_l1_loss_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class WineLoss(nn.Module):
def __init__(self):
super(WineLoss, self).__init__()
self.smoothl1 = nn.SmoothL1Loss()
def forward(self, pred, label):
loss = self.smoothl1(pred, label)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_smooth_l1_loss_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tl_math.abs(tmp2)
tmp4 = 1.0
tmp5 = tmp3 < tmp4
tmp6 = tmp3 * tmp3
tmp7 = 0.5
tmp8 = tmp6 * tmp7
tmp9 = tmp8 * tmp4
tmp10 = tmp3 - tmp7
tmp11 = tl.where(tmp5, tmp9, tmp10)
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_smooth_l1_loss_0[grid(1)](buf1, arg1_1, arg0_1, 1,
256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class WineLossNew(nn.Module):
def __init__(self):
super(WineLossNew, self).__init__()
self.smoothl1 = nn.SmoothL1Loss()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ZhongyuanW/foundation_wines | WineLoss | false | 3,015 | [
"Apache-2.0"
] | 0 | 92b9ae0ece46ecd291a9101ebef9e9421ead92d6 | https://github.com/ZhongyuanW/foundation_wines/tree/92b9ae0ece46ecd291a9101ebef9e9421ead92d6 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self.smoothl1 = nn.SmoothL1Loss()
def forward(self, pred, label):
loss = self.smoothl1(pred, label)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
SA | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/46/c46mg7rvdztu6n5oosf5c4if7ziag6obrxhwbn43lcdfibfuom7w.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%mean, %getitem], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hy/chyq5lqryq6qebxlfhdffupuca4px672fxychf3hng5vgomdaota.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k4/ck42otglczd2qtk6fovmlu2yv7bzgywgiadyjygqbvu4m2rftbjm.py
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
triton_poi_fused_mul_sigmoid_2 = async_compile.triton('triton_poi_fused_mul_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 128, grid=grid(128), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid, mul], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_2.run(primals_1, buf2, buf3, 256, grid=grid(256), stream=stream0)
return (buf3, primals_1, primals_2, buf0, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 2, 7, 7), (98, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SA(nn.Module):
def __init__(self, kernel_size=7):
super(SA, self).__init__()
assert kernel_size in (3, 7)
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
inputs = x
avg_pool = torch.mean(x, dim=1, keepdim=True)
max_pool, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_pool, max_pool], dim=1)
x = self.conv1(x)
return inputs * self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp9 = tmp7 + tmp8
tmp10 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp19 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = triton_helpers.maximum(tmp19, tmp20)
tmp22 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = triton_helpers.maximum(tmp21, tmp22)
tmp24 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp16 & xmask,
eviction_policy='evict_last', other=0.0)
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = tl.full(tmp25.shape, 0.0, tmp25.dtype)
tmp27 = tl.where(tmp16, tmp25, tmp26)
tmp28 = tl.where(tmp4, tmp15, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tl.store(in_out_ptr0 + x0, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_2(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x3, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 2, 7, 7), (98, 49, 7, 1))
assert_size_stride(primals_3, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](primals_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4, 4), (16, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_2[grid(256)](primals_1, buf2, buf3,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf3, primals_1, primals_2, buf0, buf2
class SANew(nn.Module):
def __init__(self, kernel_size=7):
super(SANew, self).__init__()
assert kernel_size in (3, 7)
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ZhijieXiao-0624/CNXA | SA | false | 3,016 | [
"MIT"
] | 0 | a63b3561010cf87f696a005f8ea252e7cdaa7ca2 | https://github.com/ZhijieXiao-0624/CNXA/tree/a63b3561010cf87f696a005f8ea252e7cdaa7ca2 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
assert kernel_size in (3, 7)
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
inputs = x
avg_pool = torch.mean(x, dim=1, keepdim=True)
max_pool, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_pool, max_pool], dim=1)
x = self.conv1(x)
return inputs * self.sigmoid(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
GELU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/nh/cnhx37tsffx4r7taj3xi72s7yfpnnccem24fupfbht6b7bzliavu.py
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
# Source node to ATen node mapping:
# gelu => add, erf, mul, mul_1, mul_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu], Original ATen: [aten.gelu]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class GELU(nn.Module):
def forward(self, input):
return F.gelu(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 0.7071067811865476
tmp4 = tmp0 * tmp3
tmp5 = libdevice.erf(tmp4)
tmp6 = 1.0
tmp7 = tmp5 + tmp6
tmp8 = tmp2 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GELUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| ag8/mrl | GELU | false | 3,017 | [
"MIT"
] | 0 | f05b00347f88020cbeb216c7e4764a4d2523b67e | https://github.com/ag8/mrl/tree/f05b00347f88020cbeb216c7e4764a4d2523b67e | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def forward(self, input):
return F.gelu(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
TransformerDecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py
# Topologically Sorted Source Nodes: [tgt], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py
# Topologically Sorted Source Nodes: [tgt], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ah/cahpqo3o7hv3q647n5lretlqvfljlubj4ic7gscxws4yvkm5jzff.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_2
# Graph fragment:
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ji/cjikooh3unjvssdwbmc5bbgrf7argvwkpdjikzfpajfrzpotlkhf.py
# Topologically Sorted Source Nodes: [tgt_1, tgt_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt_1 => add_2
# tgt_2 => var_mean_1
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/j4/cj4vucbv6vxdldbfg73k3ixw2brnd6f754oxugjq3s7syrcrb4qe.py
# Topologically Sorted Source Nodes: [tgt_1, tgt_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# tgt_1 => add_2
# tgt_2 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_9), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7u/c7up5iv73h2zidaszkmsx5lcplapb2eule6rqadg4siq5zdaoo5m.py
# Topologically Sorted Source Nodes: [tgt_1, tgt_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt_1 => add_2
# tgt_3 => add_5
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_5 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %squeeze_2), kwargs = {})
triton_poi_fused_add_8 = async_compile.triton('triton_poi_fused_add_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/72/c72dbqgy5xkloy2rcp3rynir3hrgprnoecgmcynn3qr6522vq72l.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_18), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_9 = async_compile.triton('triton_poi_fused_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/4e/c4escp7v3hikmzkxuyjmzdj3juzy4ty3yheqrkztwu2gvsteirte.py
# Topologically Sorted Source Nodes: [tgt_5], Original ATen: [aten.add]
# Source node to ATen node mapping:
# tgt_5 => add_8
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_20), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_tensor), kwargs = {})
triton_poi_fused_add_10 = async_compile.triton('triton_poi_fused_add_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (12, 4), (4, 1))
assert_size_stride(primals_12, (12, ), (1, ))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4, ), (1, ))
assert_size_stride(primals_15, (4, ), (1, ))
assert_size_stride(primals_16, (4, ), (1, ))
assert_size_stride(primals_17, (2048, 4), (4, 1))
assert_size_stride(primals_18, (2048, ), (1, ))
assert_size_stride(primals_19, (4, 2048), (2048, 1))
assert_size_stride(primals_20, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [tgt], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf10, buf11, 4, 4, grid=grid(4, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [tgt_1, tgt_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf12, buf13, buf14, 4, grid=grid(4), stream=stream0)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_1, tgt_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, grid=grid(16), stream=stream0)
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (4, 4), (1, 4), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_12, (4, ), (1, ), 4), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf17)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_12, (4, ), (1, ), 8), primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf18)
buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf19, primals_12, 16, grid=grid(16), stream=stream0)
del primals_12
buf20 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1, 1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf20, buf21, 64, grid=grid(64), stream=stream0)
buf22 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf21, buf22, 64, grid=grid(64), stream=stream0)
del buf21
buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1, 4, 1), 0), out=buf23)
buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf23, buf24, 4, 4, grid=grid(4, 4), stream=stream0)
buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0), reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf25)
buf26 = buf25; del buf25 # reuse
# Topologically Sorted Source Nodes: [tgt_1, tgt_3], Original ATen: [aten.add]
triton_poi_fused_add_8.run(buf26, primals_1, buf12, primals_14, 16, grid=grid(16), stream=stream0)
del primals_14
buf27 = buf14; del buf14 # reuse
buf28 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [tgt_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_0.run(buf26, buf27, buf28, 4, grid=grid(4), stream=stream0)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt_4], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(buf26, buf27, buf28, primals_15, primals_16, buf29, 16, grid=grid(16), stream=stream0)
del buf27
del buf28
del primals_16
buf30 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf29, reinterpret_tensor(primals_17, (4, 2048), (1, 4), 0), out=buf30)
buf31 = buf30; del buf30 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
triton_poi_fused_relu_9.run(buf31, primals_18, 8192, grid=grid(8192), stream=stream0)
del primals_18
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf31, reinterpret_tensor(primals_19, (2048, 4), (1, 2048), 0), out=buf32)
buf33 = buf32; del buf32 # reuse
# Topologically Sorted Source Nodes: [tgt_5], Original ATen: [aten.add]
triton_poi_fused_add_10.run(buf33, buf26, primals_20, 16, grid=grid(16), stream=stream0)
del primals_20
return (buf33, primals_1, primals_8, primals_15, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, primals_10, buf22, reinterpret_tensor(buf24, (4, 4), (4, 1), 0), buf26, buf29, buf31, primals_19, primals_17, primals_13, reinterpret_tensor(buf18, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_11, (4, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
from torch import nn
def _get_activation_fn(activation: 'str'):
if activation == 'relu':
return nn.functional.relu
elif activation == 'gelu':
return nn.functional.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerDecoderLayer(nn.Module):
"""
Modified from torch.nn.TransformerDecoderLayer.
Add support of normalize_before,
i.e., use layer_norm before the first block.
Args:
d_model:
the number of expected features in the input (required).
nhead:
the number of heads in the multiheadattention models (required).
dim_feedforward:
the dimension of the feedforward network model (default=2048).
dropout:
the dropout value (default=0.1).
activation:
the activation function of intermediate layer, relu or
gelu (default=relu).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
"""
def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int'
=2048, dropout: 'float'=0.1, activation: 'str'='relu',
normalize_before: 'bool'=True) ->None:
super(TransformerDecoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.src_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = nn.functional.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt: 'torch.Tensor', memory: 'torch.Tensor', tgt_mask:
'Optional[torch.Tensor]'=None, memory_mask:
'Optional[torch.Tensor]'=None, tgt_key_padding_mask:
'Optional[torch.Tensor]'=None, memory_key_padding_mask:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt:
the sequence to the decoder layer (required).
memory:
the sequence from the last layer of the encoder (required).
tgt_mask:
the mask for the tgt sequence (optional).
memory_mask:
the mask for the memory sequence (optional).
tgt_key_padding_mask:
the mask for the tgt keys per batch (optional).
memory_key_padding_mask:
the mask for the memory keys per batch (optional).
Shape:
tgt: (T, N, E).
memory: (S, N, E).
tgt_mask: (T, T).
memory_mask: (T, S).
tgt_key_padding_mask: (N, T).
memory_key_padding_mask: (N, S).
S is the source sequence length, T is the target sequence length,
N is the batch size, E is the feature number
"""
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = residual + self.dropout1(tgt2)
if not self.normalize_before:
tgt = self.norm1(tgt)
residual = tgt
if self.normalize_before:
tgt = self.norm2(tgt)
tgt2 = self.src_attn(tgt, memory, memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = residual + self.dropout2(tgt2)
if not self.normalize_before:
tgt = self.norm2(tgt)
residual = tgt
if self.normalize_before:
tgt = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = residual + self.dropout3(tgt2)
if not self.normalize_before:
tgt = self.norm3(tgt)
return tgt
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_add_8(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_10(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (12, 4), (4, 1))
assert_size_stride(primals_12, (12,), (1,))
assert_size_stride(primals_13, (4, 4), (4, 1))
assert_size_stride(primals_14, (4,), (1,))
assert_size_stride(primals_15, (4,), (1,))
assert_size_stride(primals_16, (4,), (1,))
assert_size_stride(primals_17, (2048, 4), (4, 1))
assert_size_stride(primals_18, (2048,), (1,))
assert_size_stride(primals_19, (4, 2048), (2048, 1))
assert_size_stride(primals_20, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_11, (4, 4), (1,
4), 0), out=buf16)
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 4),
primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf17)
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_12, (4,), (1,), 8),
primals_10, reinterpret_tensor(primals_11, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf18)
buf19 = reinterpret_tensor(buf16, (4, 4, 1), (1, 4, 16), 0)
del buf16
triton_poi_fused_mul_2[grid(16)](buf19, primals_12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_12
buf20 = buf8
del buf8
extern_kernels.bmm(buf19, reinterpret_tensor(buf17, (4, 1, 4), (1,
1, 4), 0), out=buf20)
buf21 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf20, buf21, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf22 = buf20
del buf20
triton_poi_fused__softmax_4[grid(64)](buf21, buf22, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf21
buf23 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf22, reinterpret_tensor(buf18, (4, 4, 1), (1,
4, 1), 0), out=buf23)
buf24 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf23, buf24, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf25 = reinterpret_tensor(buf23, (4, 4), (4, 1), 0)
del buf23
extern_kernels.mm(reinterpret_tensor(buf24, (4, 4), (4, 1), 0),
reinterpret_tensor(primals_13, (4, 4), (1, 4), 0), out=buf25)
buf26 = buf25
del buf25
triton_poi_fused_add_8[grid(16)](buf26, primals_1, buf12,
primals_14, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_14
buf27 = buf14
del buf14
buf28 = buf13
del buf13
triton_poi_fused_native_layer_norm_0[grid(4)](buf26, buf27, buf28,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf29 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](buf26, buf27, buf28,
primals_15, primals_16, buf29, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf27
del buf28
del primals_16
buf30 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf29, reinterpret_tensor(primals_17, (4, 2048),
(1, 4), 0), out=buf30)
buf31 = buf30
del buf30
triton_poi_fused_relu_9[grid(8192)](buf31, primals_18, 8192, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_18
buf32 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf31, reinterpret_tensor(primals_19, (2048, 4),
(1, 2048), 0), out=buf32)
buf33 = buf32
del buf32
triton_poi_fused_add_10[grid(16)](buf33, buf26, primals_20, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_20
return (buf33, primals_1, primals_8, primals_15, buf2, buf9,
reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15,
primals_10, buf22, reinterpret_tensor(buf24, (4, 4), (4, 1), 0),
buf26, buf29, buf31, primals_19, primals_17, primals_13,
reinterpret_tensor(buf18, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf19, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf17, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_11, (4, 4), (4, 1), 0), primals_6,
reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
def _get_activation_fn(activation: 'str'):
if activation == 'relu':
return nn.functional.relu
elif activation == 'gelu':
return nn.functional.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerDecoderLayerNew(nn.Module):
"""
Modified from torch.nn.TransformerDecoderLayer.
Add support of normalize_before,
i.e., use layer_norm before the first block.
Args:
d_model:
the number of expected features in the input (required).
nhead:
the number of heads in the multiheadattention models (required).
dim_feedforward:
the dimension of the feedforward network model (default=2048).
dropout:
the dropout value (default=0.1).
activation:
the activation function of intermediate layer, relu or
gelu (default=relu).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
"""
def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int'
=2048, dropout: 'float'=0.1, activation: 'str'='relu',
normalize_before: 'bool'=True) ->None:
super(TransformerDecoderLayerNew, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.src_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = nn.functional.relu
super(TransformerDecoderLayerNew, self).__setstate__(state)
def forward(self, input_0, input_1):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_11 = self.src_attn.in_proj_weight
primals_12 = self.src_attn.in_proj_bias
primals_6 = self.src_attn.out_proj.weight
primals_3 = self.src_attn.out_proj.bias
primals_17 = self.linear1.weight
primals_18 = self.linear1.bias
primals_19 = self.linear2.weight
primals_7 = self.linear2.bias
primals_8 = self.norm1.weight
primals_9 = self.norm1.bias
primals_14 = self.norm2.weight
primals_15 = self.norm2.bias
primals_16 = self.norm3.weight
primals_20 = self.norm3.bias
primals_10 = input_0
primals_13 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20])
return output[0]
| aarora8/icefall | TransformerDecoderLayer | false | 3,018 | [
"Apache-2.0"
] | 0 | 8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a | https://github.com/aarora8/icefall/tree/8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a | import torch
from typing import Optional
from torch import nn
def _get_activation_fn(activation: 'str'):
if activation == 'relu':
return nn.functional.relu
elif activation == 'gelu':
return nn.functional.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class Model(nn.Module):
"""
Modified from torch.nn.TransformerDecoderLayer.
Add support of normalize_before,
i.e., use layer_norm before the first block.
Args:
d_model:
the number of expected features in the input (required).
nhead:
the number of heads in the multiheadattention models (required).
dim_feedforward:
the dimension of the feedforward network model (default=2048).
dropout:
the dropout value (default=0.1).
activation:
the activation function of intermediate layer, relu or
gelu (default=relu).
Examples::
>>> decoder_layer = nn.TransformerDecoderLayer(d_model=512, nhead=8)
>>> memory = torch.rand(10, 32, 512)
>>> tgt = torch.rand(20, 32, 512)
>>> out = decoder_layer(tgt, memory)
"""
def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int'
=2048, dropout: 'float'=0.1, activation: 'str'='relu',
normalize_before: 'bool'=True) ->None:
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.src_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = nn.functional.relu
super(TransformerDecoderLayer, self).__setstate__(state)
def forward(self, tgt: 'torch.Tensor', memory: 'torch.Tensor', tgt_mask:
'Optional[torch.Tensor]'=None, memory_mask:
'Optional[torch.Tensor]'=None, tgt_key_padding_mask:
'Optional[torch.Tensor]'=None, memory_key_padding_mask:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt:
the sequence to the decoder layer (required).
memory:
the sequence from the last layer of the encoder (required).
tgt_mask:
the mask for the tgt sequence (optional).
memory_mask:
the mask for the memory sequence (optional).
tgt_key_padding_mask:
the mask for the tgt keys per batch (optional).
memory_key_padding_mask:
the mask for the memory keys per batch (optional).
Shape:
tgt: (T, N, E).
memory: (S, N, E).
tgt_mask: (T, T).
memory_mask: (T, S).
tgt_key_padding_mask: (N, T).
memory_key_padding_mask: (N, S).
S is the source sequence length, T is the target sequence length,
N is the batch size, E is the feature number
"""
residual = tgt
if self.normalize_before:
tgt = self.norm1(tgt)
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = residual + self.dropout1(tgt2)
if not self.normalize_before:
tgt = self.norm1(tgt)
residual = tgt
if self.normalize_before:
tgt = self.norm2(tgt)
tgt2 = self.src_attn(tgt, memory, memory, attn_mas
# ... truncated (>4000 chars) for memory efficiency |
SE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# out => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6u/c6utrmzep5ziyaxjrlcwvhjg7wjjd7tfipeakfphf2az5qxhw3yf.py
# Topologically Sorted Source Nodes: [conv2d, sigmoid, out_1], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# conv2d => convolution
# out_1 => mul
# sigmoid => sigmoid
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lp/clprvnh5p6cmadxtwzizwydrpjlwxohxixbw4ntucp6srbu6gtis.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# out_2 => sigmoid_1
# out_3 => mul_1
# Graph fragment:
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid_1), kwargs = {})
triton_poi_fused_mul_sigmoid_3 = async_compile.triton('triton_poi_fused_mul_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, sigmoid, out_1], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
triton_poi_fused_convolution_mul_sigmoid_1.run(buf3, primals_3, buf4, 16, grid=grid(16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.sigmoid, aten.mul]
triton_poi_fused_mul_sigmoid_3.run(primals_1, buf6, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
class SE(nn.Module):
"""Squeeze-and-Excitation block with Swish."""
def __init__(self, in_planes, se_planes):
super(SE, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'se_planes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp2, xmask)
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_convolution_mul_sigmoid_1[grid(16)](buf3,
primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 1, 1), (4, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_2[grid(16)](buf6, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sigmoid_3[grid(256)](primals_1, buf6, buf7,
256, XBLOCK=128, num_warps=4, num_stages=1)
return buf7, primals_1, primals_2, primals_4, buf1, buf3, buf4, buf6
def swish(x):
return x * x.sigmoid()
class SENew(nn.Module):
"""Squeeze-and-Excitation block with Swish."""
def __init__(self, in_planes, se_planes):
super(SENew, self).__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, input_0):
primals_2 = self.se1.weight
primals_3 = self.se1.bias
primals_4 = self.se2.weight
primals_5 = self.se2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| adarsh-kr/CVModelsBenchmark | SE | false | 3,019 | [
"MIT"
] | 0 | 85aeb76c7c796d86baa97e1272aea83d734665b5 | https://github.com/adarsh-kr/CVModelsBenchmark/tree/85aeb76c7c796d86baa97e1272aea83d734665b5 | import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
class Model(nn.Module):
"""Squeeze-and-Excitation block with Swish."""
def __init__(self, in_planes, se_planes):
super().__init__()
self.se1 = nn.Conv2d(in_planes, se_planes, kernel_size=1, bias=True)
self.se2 = nn.Conv2d(se_planes, in_planes, kernel_size=1, bias=True)
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1))
out = swish(self.se1(out))
out = self.se2(out).sigmoid()
out = x * out
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
TransformerEncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6s/c6sstbvcita246hkfqwdeatnmsh3e6vlcncrzcwlsoqg7dmxvabp.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/zv/czv3tzezwxkylzsgkrivaldxprnr7tvjr5iihe4mbc7bzdev5lsj.py
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# src => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ah/cahpqo3o7hv3q647n5lretlqvfljlubj4ic7gscxws4yvkm5jzff.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul_2
# Graph fragment:
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_2 = async_compile.triton('triton_poi_fused_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7s/c7spagnqvsgjrukyw5jujzjmswxuigeuvpyhxgdob766q2gfvgzr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__softmax_3 = async_compile.triton('triton_poi_fused__softmax_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dw/cdwqsjnh2osfmjr2utzzaqdg2vrfivzkuhareq3urgidllj2bsvr.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/y5/cy5gjrtl7netbzcjhig66pdorub2vbq2qvwmv3tamld2ehimmlz7.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_5 = async_compile.triton('triton_poi_fused_clone_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ji/cjikooh3unjvssdwbmc5bbgrf7argvwkpdjikzfpajfrzpotlkhf.py
# Topologically Sorted Source Nodes: [src_1, src_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src_1 => add_2
# src_2 => var_mean_1
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_2, [1]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_6 = async_compile.triton('triton_poi_fused_add_native_layer_norm_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/j4/cj4vucbv6vxdldbfg73k3ixw2brnd6f754oxugjq3s7syrcrb4qe.py
# Topologically Sorted Source Nodes: [src_1, src_2], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# src_1 => add_2
# src_2 => add_3, add_4, mul_3, mul_4, rsqrt_1, sub_2
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_3,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_2, %getitem_9), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %rsqrt_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_3, %primals_8), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_7 = async_compile.triton('triton_poi_fused_add_native_layer_norm_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ct/cct7fdnwiat77gmy2crh3kczskgz2h3fhqyydq4w5mawjn5eb6qf.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_11), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_8 = async_compile.triton('triton_poi_fused_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/44/c444sh6bryz652bk24ocru63kbqhe67iwwzctt3isl7imfgv5iaa.py
# Topologically Sorted Source Nodes: [src_1, src_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# src_1 => add_2
# src_3 => add_5
# Graph fragment:
# %add_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %squeeze), kwargs = {})
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_tensor), kwargs = {})
triton_poi_fused_add_9 = async_compile.triton('triton_poi_fused_add_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (2048, 4), (4, 1))
assert_size_stride(primals_11, (2048, ), (1, ))
assert_size_stride(primals_12, (4, 2048), (2048, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src], Original ATen: [aten.native_layer_norm]
triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 16, grid=grid(16), stream=stream0)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
triton_poi_fused_mul_2.run(buf6, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1, 4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_3.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4, 1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_5.run(buf10, buf11, 4, 4, grid=grid(4, 4), stream=stream0)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1; del buf1 # reuse
buf14 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [src_1, src_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_6.run(primals_1, buf12, buf13, buf14, 4, grid=grid(4), stream=stream0)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src_1, src_2], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_7.run(primals_1, buf12, buf13, buf14, primals_8, primals_9, buf15, 16, grid=grid(16), stream=stream0)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 2048), (1, 4), 0), out=buf16)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
triton_poi_fused_relu_8.run(buf17, primals_11, 8192, grid=grid(8192), stream=stream0)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (2048, 4), (1, 2048), 0), out=buf18)
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [src_1, src_3], Original ATen: [aten.add]
triton_poi_fused_add_9.run(buf19, primals_1, buf12, primals_13, 16, grid=grid(16), stream=stream0)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12, primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (4, 1), 32), reinterpret_tensor(primals_4, (4, 4), (4, 1), 16), reinterpret_tensor(primals_4, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from typing import Optional
from torch import nn
def _get_activation_fn(activation: 'str'):
if activation == 'relu':
return nn.functional.relu
elif activation == 'gelu':
return nn.functional.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerEncoderLayer(nn.Module):
"""
Modified from torch.nn.TransformerEncoderLayer.
Add support of normalize_before,
i.e., use layer_norm before the first block.
Args:
d_model:
the number of expected features in the input (required).
nhead:
the number of heads in the multiheadattention models (required).
dim_feedforward:
the dimension of the feedforward network model (default=2048).
dropout:
the dropout value (default=0.1).
activation:
the activation function of intermediate layer, relu or
gelu (default=relu).
normalize_before:
whether to use layer_norm before the first block.
Examples::
>>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int'
=2048, dropout: 'float'=0.1, activation: 'str'='relu',
normalize_before: 'bool'=True) ->None:
super(TransformerEncoderLayer, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = nn.functional.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: 'torch.Tensor', src_mask:
'Optional[torch.Tensor]'=None, src_key_padding_mask:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""
Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional)
Shape:
src: (S, N, E).
src_mask: (S, S).
src_key_padding_mask: (N, S).
S is the source sequence length, T is the target sequence length,
N is the batch size, E is the feature number
"""
residual = src
if self.normalize_before:
src = self.norm1(src)
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = residual + self.dropout1(src2)
if not self.normalize_before:
src = self.norm1(src)
residual = src
if self.normalize_before:
src = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = residual + self.dropout2(src2)
if not self.normalize_before:
src = self.norm2(src)
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_mul_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_7(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_add_9(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (2048, 4), (4, 1))
assert_size_stride(primals_11, (2048,), (1,))
assert_size_stride(primals_12, (4, 2048), (2048, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(4)](primals_1, buf0, buf1,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_native_layer_norm_1[grid(16)](primals_1, buf0,
buf1, primals_2, primals_3, buf2, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del primals_2
del primals_3
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=
1, beta=1, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=
1, beta=1, out=buf5)
buf6 = reinterpret_tensor(buf3, (4, 4, 1), (1, 4, 16), 0)
del buf3
triton_poi_fused_mul_2[grid(16)](buf6, primals_5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf4, (4, 1, 4), (1, 1,
4), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_3[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_4[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf8
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf5, (4, 4, 1), (1, 4,
1), 0), out=buf10)
buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_5[grid(4, 4)](buf10, buf11, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf12 = reinterpret_tensor(buf10, (4, 4), (4, 1), 0)
del buf10
extern_kernels.addmm(primals_7, reinterpret_tensor(buf11, (4, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf12)
del primals_7
buf13 = buf1
del buf1
buf14 = buf0
del buf0
triton_poi_fused_add_native_layer_norm_6[grid(4)](primals_1, buf12,
buf13, buf14, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_7[grid(16)](primals_1, buf12,
buf13, buf14, primals_8, primals_9, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf13
del buf14
del primals_9
buf16 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf15, reinterpret_tensor(primals_10, (4, 2048),
(1, 4), 0), out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_relu_8[grid(8192)](buf17, primals_11, 8192, XBLOCK
=256, num_warps=4, num_stages=1)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf17, reinterpret_tensor(primals_12, (2048, 4),
(1, 2048), 0), out=buf18)
buf19 = buf18
del buf18
triton_poi_fused_add_9[grid(16)](buf19, primals_1, buf12,
primals_13, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_13
return (buf19, primals_1, primals_8, buf2, buf9, reinterpret_tensor(
buf11, (4, 4), (4, 1), 0), buf12, buf15, buf17, primals_12,
primals_10, primals_6, reinterpret_tensor(buf5, (4, 1, 4), (1, 1, 4
), 0), reinterpret_tensor(buf6, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf4, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 32),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 16),
reinterpret_tensor(primals_4, (4, 4), (4, 1), 0))
def _get_activation_fn(activation: 'str'):
if activation == 'relu':
return nn.functional.relu
elif activation == 'gelu':
return nn.functional.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class TransformerEncoderLayerNew(nn.Module):
"""
Modified from torch.nn.TransformerEncoderLayer.
Add support of normalize_before,
i.e., use layer_norm before the first block.
Args:
d_model:
the number of expected features in the input (required).
nhead:
the number of heads in the multiheadattention models (required).
dim_feedforward:
the dimension of the feedforward network model (default=2048).
dropout:
the dropout value (default=0.1).
activation:
the activation function of intermediate layer, relu or
gelu (default=relu).
normalize_before:
whether to use layer_norm before the first block.
Examples::
>>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int'
=2048, dropout: 'float'=0.1, activation: 'str'='relu',
normalize_before: 'bool'=True) ->None:
super(TransformerEncoderLayerNew, self).__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = nn.functional.relu
super(TransformerEncoderLayerNew, self).__setstate__(state)
def forward(self, input_0):
primals_4 = self.self_attn.in_proj_weight
primals_5 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_2 = self.self_attn.out_proj.bias
primals_10 = self.linear1.weight
primals_11 = self.linear1.bias
primals_12 = self.linear2.weight
primals_3 = self.linear2.bias
primals_7 = self.norm1.weight
primals_8 = self.norm1.bias
primals_9 = self.norm2.weight
primals_13 = self.norm2.bias
primals_6 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0]
| aarora8/icefall | TransformerEncoderLayer | false | 3,020 | [
"Apache-2.0"
] | 0 | 8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a | https://github.com/aarora8/icefall/tree/8cb7f712e413fffbcdfdd865be73d6ff43f0ce7a | import torch
from typing import Optional
from torch import nn
def _get_activation_fn(activation: 'str'):
if activation == 'relu':
return nn.functional.relu
elif activation == 'gelu':
return nn.functional.gelu
raise RuntimeError('activation should be relu/gelu, not {}'.format(
activation))
class Model(nn.Module):
"""
Modified from torch.nn.TransformerEncoderLayer.
Add support of normalize_before,
i.e., use layer_norm before the first block.
Args:
d_model:
the number of expected features in the input (required).
nhead:
the number of heads in the multiheadattention models (required).
dim_feedforward:
the dimension of the feedforward network model (default=2048).
dropout:
the dropout value (default=0.1).
activation:
the activation function of intermediate layer, relu or
gelu (default=relu).
normalize_before:
whether to use layer_norm before the first block.
Examples::
>>> encoder_layer = TransformerEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model: 'int', nhead: 'int', dim_feedforward: 'int'
=2048, dropout: 'float'=0.1, activation: 'str'='relu',
normalize_before: 'bool'=True) ->None:
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=0.0)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = nn.functional.relu
super(TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: 'torch.Tensor', src_mask:
'Optional[torch.Tensor]'=None, src_key_padding_mask:
'Optional[torch.Tensor]'=None) ->torch.Tensor:
"""
Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional)
Shape:
src: (S, N, E).
src_mask: (S, S).
src_key_padding_mask: (N, S).
S is the source sequence length, T is the target sequence length,
N is the batch size, E is the feature number
"""
residual = src
if self.normalize_before:
src = self.norm1(src)
src2 = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = residual + self.dropout1(src2)
if not self.normalize_before:
src = self.norm1(src)
residual = src
if self.normalize_before:
src = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = residual + self.dropout2(src2)
if not self.normalize_before:
src = self.norm2(src)
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
SimpleFC | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/v6/cv6odvhmmcyvquog4eo62pdliew53orxzwe2wfzampr64jy3ppa7.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf1, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
class SimpleFC(nn.Module):
def __init__(self, input_size, num_classes, name='SimpleFC'):
super(SimpleFC, self).__init__()
self.FC = nn.Parameter(torch.randn([input_size, num_classes]))
self.FCbias = nn.Parameter(torch.randn([num_classes]))
def forward(self, input):
return torch.matmul(input, self.FC) + self.FCbias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_3
return buf1, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
class SimpleFCNew(nn.Module):
def __init__(self, input_size, num_classes, name='SimpleFC'):
super(SimpleFCNew, self).__init__()
self.FC = nn.Parameter(torch.randn([input_size, num_classes]))
self.FCbias = nn.Parameter(torch.randn([num_classes]))
def forward(self, input_0):
primals_1 = self.FC
primals_3 = self.FCbias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| adityakusupati/EdgeML | SimpleFC | false | 3,021 | [
"MIT"
] | 0 | 65933a6fdfc38945f4311043a62e120784b2b0bf | https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf | import torch
import torch.nn as nn
import torch.onnx
class Model(nn.Module):
def __init__(self, input_size, num_classes, name='SimpleFC'):
super().__init__()
self.FC = nn.Parameter(torch.randn([input_size, num_classes]))
self.FCbias = nn.Parameter(torch.randn([num_classes]))
def forward(self, input):
return torch.matmul(input, self.FC) + self.FCbias
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
DummyAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/v2/cv2xr6jl6gqjzdbzmj7aqb45fwhzry7vtuoppihgvgnpilaiccu6.py
# Topologically Sorted Source Nodes: [net_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# net_1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ts/ctscnzvbagjv4t25zui245b3recij5udu7nvujnr5rixcyo7elc6.py
# Topologically Sorted Source Nodes: [net_4], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# net_4 => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_4, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k6/ck6fz3qsfeqgn5jtm4ugikmu7cwvvlq3jpttijbb5kdniicwtyz6.py
# Topologically Sorted Source Nodes: [net_4], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# net_4 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (32, 4), (4, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (1, 32), (32, 1))
assert_size_stride(primals_5, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 32), (128, 32, 1), 0); del buf0 # reuse
buf7 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [net_1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf7, 512, grid=grid(512), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [net_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 1), (1, 32), 0), alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [net_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf3, buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [net_4], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [net_6], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0), primals_1, out=buf6)
del buf5
return (reinterpret_tensor(buf6, (4, 4), (4, 1), 0), primals_1, reinterpret_tensor(buf1, (16, 32), (32, 1), 0), buf3, primals_4, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class DummyAttention(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.linear1 = nn.Linear(in_channels, 32)
self.linear2 = nn.Linear(32, 1)
pass
def forward(self, net):
value = net
net = self.linear1(net)
net = F.relu(net)
net = self.linear2(net)
net = torch.flatten(net, 1)
net = F.softmax(net, dim=1)
net = torch.unsqueeze(net, dim=1)
net = torch.matmul(net, value)
net = torch.flatten(net, 1)
return net
pass
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (32, 4), (4, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (1, 32), (32, 1))
assert_size_stride(primals_5, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 32), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 32), (128, 32, 1), 0)
del buf0
buf7 = empty_strided_cuda((4, 4, 32), (128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(512)](buf1,
primals_3, buf7, 512, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (16, 32),
(32, 1), 0), reinterpret_tensor(primals_4, (32, 1), (1, 32), 0),
alpha=1, beta=1, out=buf3)
del primals_5
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(16)](buf3, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 1, 4), (4, 4, 1), 0)
del buf4
extern_kernels.bmm(reinterpret_tensor(buf5, (4, 1, 4), (4, 0, 1), 0
), primals_1, out=buf6)
del buf5
return reinterpret_tensor(buf6, (4, 4), (4, 1), 0
), primals_1, reinterpret_tensor(buf1, (16, 32), (32, 1), 0
), buf3, primals_4, buf7
class DummyAttentionNew(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.linear1 = nn.Linear(in_channels, 32)
self.linear2 = nn.Linear(32, 1)
pass
pass
def forward(self, input_0):
primals_2 = self.linear1.weight
primals_3 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| aaalgo/health_outcome_challenge | DummyAttention | false | 3,022 | [
"BSD-3-Clause"
] | 0 | 6d0cb660123ac6fb4939c1d50a8f1914e8e3e165 | https://github.com/aaalgo/health_outcome_challenge/tree/6d0cb660123ac6fb4939c1d50a8f1914e8e3e165 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.linear1 = nn.Linear(in_channels, 32)
self.linear2 = nn.Linear(32, 1)
pass
def forward(self, net):
value = net
net = self.linear1(net)
net = F.relu(net)
net = self.linear2(net)
net = torch.flatten(net, 1)
net = F.softmax(net, dim=1)
net = torch.unsqueeze(net, dim=1)
net = torch.matmul(net, value)
net = torch.flatten(net, 1)
return net
pass
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [4]
|
ProtoNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/dj/cdjhwfxa4rmj7sf4p4wvsuo75o54dwfcioekxuix47trjvtclo2h.py
# Topologically Sorted Source Nodes: [l2sim, l2sim_1, l2sim_2], Original ATen: [aten.sub, aten.pow, aten.sum]
# Source node to ATen node mapping:
# l2sim => sub
# l2sim_1 => pow_1
# l2sim_2 => sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vq/cvqpmuazxnwnsptaxlt2b7ipl2pprxtdxzwiesxonokjn253gipy.py
# Topologically Sorted Source Nodes: [gammal2sim, M, y, y_1], Original ATen: [aten.mul, aten.exp, aten.sum]
# Source node to ATen node mapping:
# M => exp
# gammal2sim => mul
# y => mul_1
# y_1 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, -16), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %exp), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [2]), kwargs = {})
triton_poi_fused_exp_mul_sum_1 = async_compile.triton('triton_poi_fused_exp_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = -16.0
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp0 * tmp4
tmp8 = tmp7 * tmp2
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp6 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp13 * tmp2
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp12 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp19 * tmp2
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp18 * tmp21
tmp23 = tmp17 + tmp22
tl.store(out_ptr0 + (x2), tmp23, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [WX], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [l2sim, l2sim_1, l2sim_2], Original ATen: [aten.sub, aten.pow, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0.run(primals_2, buf0, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gammal2sim, M, y, y_1], Original ATen: [aten.mul, aten.exp, aten.sum]
triton_poi_fused_exp_mul_sum_1.run(primals_3, buf1, buf2, 256, grid=grid(256), stream=stream0)
return (buf2, buf1, primals_2, primals_3, buf0, buf1, reinterpret_tensor(primals_4, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
import torch.onnx
class ProtoNN(nn.Module):
def __init__(self, inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma, W=None, B=None, Z=None):
"""
Forward computation graph for ProtoNN.
inputDimension: Input data dimension or feature dimension.
projectionDimension: hyperparameter
numPrototypes: hyperparameter
numOutputLabels: The number of output labels or classes
W, B, Z: Numpy matrices that can be used to initialize
projection matrix(W), prototype matrix (B) and prototype labels
matrix (B).
Expected Dimensions:
W inputDimension (d) x projectionDimension (d_cap)
B projectionDimension (d_cap) x numPrototypes (m)
Z numOutputLabels (L) x numPrototypes (m)
"""
super(ProtoNN, self).__init__()
self.__d = inputDimension
self.__d_cap = projectionDimension
self.__m = numPrototypes
self.__L = numOutputLabels
self.W, self.B, self.Z = None, None, None
self.gamma = gamma
self.__validInit = False
self.__initWBZ(W, B, Z)
self.__validateInit()
def __validateInit(self):
self.__validinit = False
errmsg = 'Dimensions mismatch! Should be W[d, d_cap]'
errmsg += ', B[d_cap, m] and Z[L, m]'
d, d_cap, m, L, _ = self.getHyperParams()
assert self.W.shape[0] == d, errmsg
assert self.W.shape[1] == d_cap, errmsg
assert self.B.shape[0] == d_cap, errmsg
assert self.B.shape[1] == m, errmsg
assert self.Z.shape[0] == L, errmsg
assert self.Z.shape[1] == m, errmsg
self.__validInit = True
def __initWBZ(self, inW, inB, inZ):
if inW is None:
self.W = torch.randn([self.__d, self.__d_cap])
self.W = nn.Parameter(self.W)
else:
self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32)))
if inB is None:
self.B = torch.randn([self.__d_cap, self.__m])
self.B = nn.Parameter(self.B)
else:
self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32)))
if inZ is None:
self.Z = torch.randn([self.__L, self.__m])
self.Z = nn.Parameter(self.Z)
else:
self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32)))
def getHyperParams(self):
"""
Returns the model hyperparameters:
[inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma]
"""
d = self.__d
dcap = self.__d_cap
m = self.__m
L = self.__L
return d, dcap, m, L, self.gamma
def getModelMatrices(self):
"""
Returns model matrices, which can then be evaluated to obtain
corresponding numpy arrays. These can then be exported as part of
other implementations of ProtonNN, for instance a C++ implementation or
pure python implementation.
Returns
[ProjectionMatrix (W), prototypeMatrix (B),
prototypeLabelsMatrix (Z), gamma]
"""
return self.W, self.B, self.Z, self.gamma
def forward(self, X):
"""
This method is responsible for construction of the forward computation
graph. The end point of the computation graph, or in other words the
output operator for the forward computation is returned.
X: Input of shape [-1, inputDimension]
returns: The forward computation outputs, self.protoNNOut
"""
assert self.__validInit is True, 'Initialization failed!'
W, B, Z, gamma = self.W, self.B, self.Z, self.gamma
WX = torch.matmul(X, W)
dim = [-1, WX.shape[1], 1]
WX = torch.reshape(WX, dim)
dim = [1, B.shape[0], -1]
B_ = torch.reshape(B, dim)
l2sim = B_ - WX
l2sim = torch.pow(l2sim, 2)
l2sim = torch.sum(l2sim, dim=1, keepdim=True)
self.l2sim = l2sim
gammal2sim = -1 * gamma * gamma * l2sim
M = torch.exp(gammal2sim)
dim = [1] + list(Z.shape)
Z_ = torch.reshape(Z, dim)
y = Z_ * M
y = torch.sum(y, dim=2)
return y
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inputDimension': 4, 'projectionDimension': 4,
'numPrototypes': 4, 'numOutputLabels': 4, 'gamma': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_exp_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp2 = -16.0
tmp3 = tmp1 * tmp2
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp0 * tmp4
tmp8 = tmp7 * tmp2
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp6 * tmp9
tmp11 = tmp5 + tmp10
tmp14 = tmp13 * tmp2
tmp15 = tl_math.exp(tmp14)
tmp16 = tmp12 * tmp15
tmp17 = tmp11 + tmp16
tmp20 = tmp19 * tmp2
tmp21 = tl_math.exp(tmp20)
tmp22 = tmp18 * tmp21
tmp23 = tmp17 + tmp22
tl.store(out_ptr0 + x2, tmp23, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 1, 4), (4, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_sub_sum_0[grid(256)](primals_2, buf0, buf1,
256, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_exp_mul_sum_1[grid(256)](primals_3, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf2, buf1, primals_2, primals_3, buf0, buf1, reinterpret_tensor(
primals_4, (4, 64), (1, 4), 0)
class ProtoNNNew(nn.Module):
def __init__(self, inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma, W=None, B=None, Z=None):
"""
Forward computation graph for ProtoNN.
inputDimension: Input data dimension or feature dimension.
projectionDimension: hyperparameter
numPrototypes: hyperparameter
numOutputLabels: The number of output labels or classes
W, B, Z: Numpy matrices that can be used to initialize
projection matrix(W), prototype matrix (B) and prototype labels
matrix (B).
Expected Dimensions:
W inputDimension (d) x projectionDimension (d_cap)
B projectionDimension (d_cap) x numPrototypes (m)
Z numOutputLabels (L) x numPrototypes (m)
"""
super(ProtoNNNew, self).__init__()
self.__d = inputDimension
self.__d_cap = projectionDimension
self.__m = numPrototypes
self.__L = numOutputLabels
self.W, self.B, self.Z = None, None, None
self.gamma = gamma
self.__validInit = False
self.__initWBZ(W, B, Z)
self.__validateInit()
def __validateInit(self):
self.__validinit = False
errmsg = 'Dimensions mismatch! Should be W[d, d_cap]'
errmsg += ', B[d_cap, m] and Z[L, m]'
d, d_cap, m, L, _ = self.getHyperParams()
assert self.W.shape[0] == d, errmsg
assert self.W.shape[1] == d_cap, errmsg
assert self.B.shape[0] == d_cap, errmsg
assert self.B.shape[1] == m, errmsg
assert self.Z.shape[0] == L, errmsg
assert self.Z.shape[1] == m, errmsg
self.__validInit = True
def __initWBZ(self, inW, inB, inZ):
if inW is None:
self.W = torch.randn([self.__d, self.__d_cap])
self.W = nn.Parameter(self.W)
else:
self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32)))
if inB is None:
self.B = torch.randn([self.__d_cap, self.__m])
self.B = nn.Parameter(self.B)
else:
self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32)))
if inZ is None:
self.Z = torch.randn([self.__L, self.__m])
self.Z = nn.Parameter(self.Z)
else:
self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32)))
def getHyperParams(self):
"""
Returns the model hyperparameters:
[inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma]
"""
d = self.__d
dcap = self.__d_cap
m = self.__m
L = self.__L
return d, dcap, m, L, self.gamma
def getModelMatrices(self):
"""
Returns model matrices, which can then be evaluated to obtain
corresponding numpy arrays. These can then be exported as part of
other implementations of ProtonNN, for instance a C++ implementation or
pure python implementation.
Returns
[ProjectionMatrix (W), prototypeMatrix (B),
prototypeLabelsMatrix (Z), gamma]
"""
return self.W, self.B, self.Z, self.gamma
def forward(self, input_0):
primals_1 = self.W
primals_2 = self.B
primals_3 = self.Z
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| adityakusupati/EdgeML | ProtoNN | false | 3,023 | [
"MIT"
] | 0 | 65933a6fdfc38945f4311043a62e120784b2b0bf | https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf | import torch
import numpy as np
import torch.nn as nn
import torch.onnx
class Model(nn.Module):
def __init__(self, inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma, W=None, B=None, Z=None):
"""
Forward computation graph for ProtoNN.
inputDimension: Input data dimension or feature dimension.
projectionDimension: hyperparameter
numPrototypes: hyperparameter
numOutputLabels: The number of output labels or classes
W, B, Z: Numpy matrices that can be used to initialize
projection matrix(W), prototype matrix (B) and prototype labels
matrix (B).
Expected Dimensions:
W inputDimension (d) x projectionDimension (d_cap)
B projectionDimension (d_cap) x numPrototypes (m)
Z numOutputLabels (L) x numPrototypes (m)
"""
super().__init__()
self.__d = inputDimension
self.__d_cap = projectionDimension
self.__m = numPrototypes
self.__L = numOutputLabels
self.W, self.B, self.Z = None, None, None
self.gamma = gamma
self.__validInit = False
self.__initWBZ(W, B, Z)
self.__validateInit()
def __validateInit(self):
self.__validinit = False
errmsg = 'Dimensions mismatch! Should be W[d, d_cap]'
errmsg += ', B[d_cap, m] and Z[L, m]'
d, d_cap, m, L, _ = self.getHyperParams()
assert self.W.shape[0] == d, errmsg
assert self.W.shape[1] == d_cap, errmsg
assert self.B.shape[0] == d_cap, errmsg
assert self.B.shape[1] == m, errmsg
assert self.Z.shape[0] == L, errmsg
assert self.Z.shape[1] == m, errmsg
self.__validInit = True
def __initWBZ(self, inW, inB, inZ):
if inW is None:
self.W = torch.randn([self.__d, self.__d_cap])
self.W = nn.Parameter(self.W)
else:
self.W = nn.Parameter(torch.from_numpy(inW.astype(np.float32)))
if inB is None:
self.B = torch.randn([self.__d_cap, self.__m])
self.B = nn.Parameter(self.B)
else:
self.B = nn.Parameter(torch.from_numpy(inB.astype(np.float32)))
if inZ is None:
self.Z = torch.randn([self.__L, self.__m])
self.Z = nn.Parameter(self.Z)
else:
self.Z = nn.Parameter(torch.from_numpy(inZ.astype(np.float32)))
def getHyperParams(self):
"""
Returns the model hyperparameters:
[inputDimension, projectionDimension, numPrototypes,
numOutputLabels, gamma]
"""
d = self.__d
dcap = self.__d_cap
m = self.__m
L = self.__L
return d, dcap, m, L, self.gamma
def getModelMatrices(self):
"""
Returns model matrices, which can then be evaluated to obtain
corresponding numpy arrays. These can then be exported as part of
other implementations of ProtonNN, for instance a C++ implementation or
pure python implementation.
Returns
[ProjectionMatrix (W), prototypeMatrix (B),
prototypeLabelsMatrix (Z), gamma]
"""
return self.W, self.B, self.Z, self.gamma
def forward(self, X):
"""
This method is responsible for construction of the forward computation
graph. The end point of the computation graph, or in other words the
output operator for the forward computation is returned.
X: Input of shape [-1, inputDimension]
returns: The forward computation outputs, self.protoNNOut
"""
assert self.__validInit is True, 'Initialization failed!'
W, B, Z, gamma = self.W, self.B, self.Z, self.gamma
WX = torch.matmul(X, W)
dim = [-1, WX.shape[1], 1]
WX = torch.reshape(WX, dim)
dim = [1, B.shape[0], -1]
B_ = torch.reshape(B, dim)
l2sim = B_ - WX
l2sim
# ... truncated (>4000 chars) for memory efficiency |
ReferenceWeightBinarizationModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4h/c4htmap5g5zaulvbnmkdpp4b27t63ovlzkkb2l6lw3bnkwk5l3al.py
# Topologically Sorted Source Nodes: [gt, type_1, mul, sign, abs_1, norm, output], Original ATen: [aten.gt, aten._to_copy, aten.mul, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# abs_1 => abs_1
# gt => gt
# mul => mul
# norm => mean
# output => mul_1
# sign => sub
# type_1 => convert_element_type
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%gt, torch.float32), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%abs_1, [1, 2, 3], True), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mean), kwargs = {})
triton_per_fused__to_copy_abs_gt_mean_mul_sub_0 = async_compile.triton('triton_per_fused__to_copy_abs_gt_mean_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_abs_gt_mean_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_abs_gt_mean_mul_sub_0(in_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 0.0
tmp7 = tmp0 > tmp6
tmp8 = tmp7.to(tl.float32)
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = 1.0
tmp12 = tmp10 - tmp11
tmp13 = 64.0
tmp14 = tmp5 / tmp13
tmp15 = tmp12 * tmp14
tl.store(out_ptr1 + (r1 + (64*x0)), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gt, type_1, mul, sign, abs_1, norm, output], Original ATen: [aten.gt, aten._to_copy, aten.mul, aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_abs_gt_mean_mul_sub_0.run(arg0_1, buf1, 4, 64, grid=grid(4), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
class ReferenceDOREFABinarize(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
norm = x.abs().mean()
sign = (x > 0).type(x.dtype) * 2 - 1
output_flat = sign * norm
return output_flat.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class ReferenceXNORBinarize(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
norm = x.abs().mean([1, 2, 3], keepdim=True)
sign = (x > 0).type(x.dtype) * 2 - 1
output = sign * norm
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class ReferenceWeightBinarizationModule(nn.Module):
def __init__(self, mode='xnor'):
super().__init__()
self.mode = mode
if self.mode == 'xnor':
self.binarize = ReferenceXNORBinarize.apply
elif self.mode == 'dorefa':
self.binarize = ReferenceDOREFABinarize.apply
def forward(self, input_):
return self.binarize(input_)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_abs_gt_mean_mul_sub_0(in_ptr0, out_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp1 = tl_math.abs(tmp0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(xmask, tmp2, 0)
tmp5 = tl.sum(tmp4, 1)[:, None]
tmp6 = 0.0
tmp7 = tmp0 > tmp6
tmp8 = tmp7.to(tl.float32)
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = 1.0
tmp12 = tmp10 - tmp11
tmp13 = 64.0
tmp14 = tmp5 / tmp13
tmp15 = tmp12 * tmp14
tl.store(out_ptr1 + (r1 + 64 * x0), tmp15, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_abs_gt_mean_mul_sub_0[grid(4)](arg0_1,
buf1, 4, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class ReferenceDOREFABinarize(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
norm = x.abs().mean()
sign = (x > 0).type(x.dtype) * 2 - 1
output_flat = sign * norm
return output_flat.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class ReferenceXNORBinarize(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
norm = x.abs().mean([1, 2, 3], keepdim=True)
sign = (x > 0).type(x.dtype) * 2 - 1
output = sign * norm
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class ReferenceWeightBinarizationModuleNew(nn.Module):
def __init__(self, mode='xnor'):
super().__init__()
self.mode = mode
if self.mode == 'xnor':
self.binarize = ReferenceXNORBinarize.apply
elif self.mode == 'dorefa':
self.binarize = ReferenceDOREFABinarize.apply
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| JinYAnGHe/openvino_training_extensions | ReferenceWeightBinarizationModule | false | 3,024 | [
"Apache-2.0"
] | 0 | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | https://github.com/JinYAnGHe/openvino_training_extensions/tree/a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
class ReferenceDOREFABinarize(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
norm = x.abs().mean()
sign = (x > 0).type(x.dtype) * 2 - 1
output_flat = sign * norm
return output_flat.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class ReferenceXNORBinarize(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
norm = x.abs().mean([1, 2, 3], keepdim=True)
sign = (x > 0).type(x.dtype) * 2 - 1
output = sign * norm
return output
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Model(nn.Module):
def __init__(self, mode='xnor'):
super().__init__()
self.mode = mode
if self.mode == 'xnor':
self.binarize = ReferenceXNORBinarize.apply
elif self.mode == 'dorefa':
self.binarize = ReferenceDOREFABinarize.apply
def forward(self, input_):
return self.binarize(input_)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
StateInitZero | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7e/c7edgnsiuilw7uzwau7radvkvvtmowm7d7uh56mczbhieiykfrnx.py
# Topologically Sorted Source Nodes: [h0], Original ATen: [aten.new_zeros]
# Source node to ATen node mapping:
# h0 => full_default
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([1, 4, 4], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_new_zeros_0 = async_compile.triton('triton_poi_fused_new_zeros_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_new_zeros_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h0], Original ATen: [aten.new_zeros]
stream0 = get_raw_stream(0)
triton_poi_fused_new_zeros_0.run(buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [c0], Original ATen: [aten.new_zeros]
triton_poi_fused_new_zeros_0.run(buf1, 16, grid=grid(16), stream=stream0)
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
class StateInitZero(nn.Module):
def __init__(self, hidden_size, num_layers=1, batch_first=False):
super(StateInitZero, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
def forward(self, input: 'torch.Tensor'):
h0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
c0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
return h0, c0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = 0.0
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((1, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_new_zeros_0[grid(16)](buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf0, buf1
class StateInitZeroNew(nn.Module):
def __init__(self, hidden_size, num_layers=1, batch_first=False):
super(StateInitZeroNew, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1]
| JinYAnGHe/openvino_training_extensions | StateInitZero | false | 3,025 | [
"Apache-2.0"
] | 0 | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | https://github.com/JinYAnGHe/openvino_training_extensions/tree/a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
class Model(nn.Module):
def __init__(self, hidden_size, num_layers=1, batch_first=False):
super().__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.batch_first = batch_first
def forward(self, input: 'torch.Tensor'):
h0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
c0 = input.new_zeros((self.num_layers, input.size(0 if self.
batch_first else 1), self.hidden_size))
return h0, c0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
FakeReLUM | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/k3/ck34u2ihhbnbcv7k6gttuj4jtoheyxbtbsnyfudkwvvrmxpo2zpp.py
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp]
# Source node to ATen node mapping:
# clamp => clamp_min
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 0), kwargs = {})
triton_poi_fused_clamp_0 = async_compile.triton('triton_poi_fused_clamp_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [clamp], Original ATen: [aten.clamp]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from typing import *
import torch.utils.data
class FakeReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class FakeReLUM(nn.Module):
def forward(self, x):
return FakeReLU.apply(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from typing import *
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class FakeReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class FakeReLUMNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| agarwalsiddhant10/blackbox-smoothing | FakeReLUM | false | 3,026 | [
"MIT"
] | 0 | cf18a9dc45f807494955d0cf19a3d1dd4315b54f | https://github.com/agarwalsiddhant10/blackbox-smoothing/tree/cf18a9dc45f807494955d0cf19a3d1dd4315b54f | import torch
import torch.nn as nn
from typing import *
import torch.utils.data
class FakeReLU(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Model(nn.Module):
def forward(self, x):
return FakeReLU.apply(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Conv2dSame | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fy/cfyjw5e45zcfxpduounab3jarehtt43xkfarqbghe4gsj2ddcfmo.py
# Topologically Sorted Source Nodes: [x_pad, x_relu], Original ATen: [aten.constant_pad_nd, aten.relu]
# Source node to ATen node mapping:
# x_pad => constant_pad_nd
# x_relu => relu
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 2, 1, 2], 0.0), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%constant_pad_nd,), kwargs = {})
triton_poi_fused_constant_pad_nd_relu_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_out => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_pad, x_relu], Original ATen: [aten.constant_pad_nd, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_relu_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super(Conv2dSame, self).__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_relu_0[grid(784)](primals_1, buf0,
784, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class Conv2dSameNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super(Conv2dSameNew, self).__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, input_0):
primals_1 = self.layer.weight
primals_3 = self.layer.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| adityamehta00/HIDeGAN | Conv2dSame | false | 3,027 | [
"BSD-3-Clause"
] | 0 | 91a0674e092ccde2784a82bf927dfefd8673eb4c | https://github.com/adityamehta00/HIDeGAN/tree/91a0674e092ccde2784a82bf927dfefd8673eb4c | import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super().__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
VAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fp/cfp5jrxxyxrvhcpoq5tio3p5tkhj5ugdrpyur3x4v6meatzih7jn.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask)
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uw/cuwyaunsqgt6y446p46tklj3afnj5ynfwg2kxpptkdwfoiux233j.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/rf/crf4mhe3jx7leqalqrno2jhukq6yyyrwb7jxxlrjvfgbizyonfao.py
# Topologically Sorted Source Nodes: [mu, logvar, mul, std, eps, mul_1, z, out_4], Original ATen: [aten.convolution, aten.mul, aten.exp, aten.randn_like, aten.add]
# Source node to ATen node mapping:
# eps => randn
# logvar => convolution_3
# mu => convolution_2
# mul => mul
# mul_1 => mul_1
# out_4 => convolution_4
# std => exp
# z => add
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.5), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %randn : [num_users=2] = call_function[target=torch.ops.aten.randn.default](args = ([4, 4, 4, 4],), kwargs = {dtype: torch.float32, device: cuda:0, pin_memory: False})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_2, %mul_1), kwargs = {})
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%add, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_add_convolution_exp_mul_randn_like_2 = async_compile.triton('triton_poi_fused_add_convolution_exp_mul_randn_like_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_exp_mul_randn_like_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_exp_mul_randn_like_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp4 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2 + (16*y3)), xmask & ymask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = 0.5
tmp8 = tmp5 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp6 * tmp9
tmp11 = tmp2 + tmp10
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
tl.store(out_ptr1 + (x2 + (16*y3)), tmp5, xmask & ymask)
tl.store(out_ptr2 + (y0 + (4*x2) + (64*y1)), tmp6, xmask & ymask)
tl.store(out_ptr3 + (x2 + (16*y3)), tmp11, xmask & ymask)
tl.store(out_ptr4 + (y0 + (4*x2) + (64*y1)), tmp11, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ur/curff7s5hkywtnymj6ypek5datxmdsklderihqz6yrgsqpmvwflf.py
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_8 => convolution_6
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 16], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (64*y1)), xmask & ymask)
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (16*y3)), tmp2, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (400, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (400, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_5, (400, ), (1, ))
assert_size_stride(primals_6, (4, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (400, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (400, ), (1, ))
assert_size_stride(primals_12, (400, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_13, (400, ), (1, ))
assert_size_stride(primals_14, (4, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 16, 16, grid=grid(16, 16), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_2, 25600, grid=grid(25600), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf4, primals_5, 25600, grid=grid(25600), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 1, 16, 4))
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like]
buf9 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf10 = buf9
del buf9
# Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 1, 16, 4))
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Topologically Sorted Source Nodes: [mu, logvar, mul, std, eps, mul_1, z, out_4], Original ATen: [aten.convolution, aten.mul, aten.exp, aten.randn_like, aten.add]
triton_poi_fused_add_convolution_exp_mul_randn_like_2.run(buf5, primals_7, buf7, primals_9, buf10, buf6, buf8, buf11, buf12, buf13, 16, 16, grid=grid(16, 16), stream=stream0)
del buf10
del buf5
del buf7
del primals_7
del primals_9
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [out_4, out_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf15, primals_11, 25600, grid=grid(25600), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf15, primals_12, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [out_6, out_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf17, primals_13, 25600, grid=grid(25600), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 4, 4), (64, 1, 16, 4))
buf19 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [out_8], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf18, primals_15, buf19, 16, 16, grid=grid(16, 16), stream=stream0)
del buf18
del primals_15
return (buf19, buf12, buf6, buf8, primals_1, buf0, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, buf2, buf4, buf8, buf11, buf12, buf15, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((400, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((400, 400, 1, 1), (400, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 400, 1, 1), (400, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 400, 1, 1), (400, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((400, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((400, 400, 1, 1), (400, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 400, 1, 1), (400, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.init as init
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
class VAE(nn.Module):
def __init__(self, length, n_latent):
super(VAE, self).__init__()
self.length = length
self.n_latent = n_latent
self.C1 = nn.Conv2d(self.length, 400, kernel_size=1)
self.C2 = nn.Conv2d(400, 400, kernel_size=1)
self.C31 = nn.Conv2d(400, self.n_latent, kernel_size=1)
self.C32 = nn.Conv2d(400, self.n_latent, kernel_size=1)
self.C4 = nn.Conv2d(self.n_latent, 400, kernel_size=1)
self.C5 = nn.Conv2d(400, 400, kernel_size=1)
self.C6 = nn.Conv2d(400, self.length, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def weights_init(self):
for module in self.modules():
kaiming_init(module)
def encode(self, x):
out = self.C1(x)
out = self.relu(out)
out = self.C2(out)
out = self.relu(out)
mu = self.C31(out)
logvar = self.C32(out)
return mu, logvar
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
out = self.C4(z)
out = self.relu(out)
out = self.C5(out)
out = self.relu(out)
out = self.C6(out)
return out
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), z, mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'length': 4, 'n_latent': 4}]
| import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask)
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_exp_mul_randn_like_2(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, out_ptr2, out_ptr3,
out_ptr4, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp4 = tl.load(in_ptr3 + y0, ymask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x2 + 16 * y3), xmask & ymask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp7 = 0.5
tmp8 = tmp5 * tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp6 * tmp9
tmp11 = tmp2 + tmp10
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
tl.store(out_ptr1 + (x2 + 16 * y3), tmp5, xmask & ymask)
tl.store(out_ptr2 + (y0 + 4 * x2 + 64 * y1), tmp6, xmask & ymask)
tl.store(out_ptr3 + (x2 + 16 * y3), tmp11, xmask & ymask)
tl.store(out_ptr4 + (y0 + 4 * x2 + 64 * y1), tmp11, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 64 * y1), xmask & ymask)
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 16 * y3), tmp2, xmask & ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (400, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (400, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_5, (400,), (1,))
assert_size_stride(primals_6, (4, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (400, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (400,), (1,))
assert_size_stride(primals_12, (400, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_13, (400,), (1,))
assert_size_stride(primals_14, (4, 400, 1, 1), (400, 1, 1, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(16, 16)](primals_3, buf0, 16, 16, XBLOCK=16,
YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(25600)](buf2, primals_2,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_1[grid(25600)](buf4, primals_5,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 4, 4), (64, 1, 16, 4))
buf9 = torch.ops.aten.randn.default([4, 4, 4, 4], dtype=torch.
float32, device=device(type='cuda', index=0), pin_memory=False)
buf10 = buf9
del buf9
buf7 = extern_kernels.convolution(buf4, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 4, 4, 4), (64, 1, 16, 4))
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 1, 16, 4), torch.float32)
triton_poi_fused_add_convolution_exp_mul_randn_like_2[grid(16, 16)](
buf5, primals_7, buf7, primals_9, buf10, buf6, buf8, buf11,
buf12, buf13, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4,
num_stages=1)
del buf10
del buf5
del buf7
del primals_7
del primals_9
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf15 = buf14
del buf14
triton_poi_fused_convolution_relu_1[grid(25600)](buf15, primals_11,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf16 = extern_kernels.convolution(buf15, primals_12, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 400, 4, 4), (6400, 1, 1600, 400))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_1[grid(25600)](buf17, primals_13,
25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_13
buf18 = extern_kernels.convolution(buf17, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 4, 4, 4), (64, 1, 16, 4))
buf19 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf13
triton_poi_fused_convolution_3[grid(16, 16)](buf18, primals_15,
buf19, 16, 16, XBLOCK=16, YBLOCK=16, num_warps=4, num_stages=1)
del buf18
del primals_15
return (buf19, buf12, buf6, buf8, primals_1, buf0, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, buf2, buf4, buf8,
buf11, buf12, buf15, buf17)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
class VAENew(nn.Module):
def __init__(self, length, n_latent):
super(VAENew, self).__init__()
self.length = length
self.n_latent = n_latent
self.C1 = nn.Conv2d(self.length, 400, kernel_size=1)
self.C2 = nn.Conv2d(400, 400, kernel_size=1)
self.C31 = nn.Conv2d(400, self.n_latent, kernel_size=1)
self.C32 = nn.Conv2d(400, self.n_latent, kernel_size=1)
self.C4 = nn.Conv2d(self.n_latent, 400, kernel_size=1)
self.C5 = nn.Conv2d(400, 400, kernel_size=1)
self.C6 = nn.Conv2d(400, self.length, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def weights_init(self):
for module in self.modules():
kaiming_init(module)
def encode(self, x):
out = self.C1(x)
out = self.relu(out)
out = self.C2(out)
out = self.relu(out)
mu = self.C31(out)
logvar = self.C32(out)
return mu, logvar
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
out = self.C4(z)
out = self.relu(out)
out = self.C5(out)
out = self.relu(out)
out = self.C6(out)
return out
def forward(self, input_0):
primals_1 = self.C1.weight
primals_2 = self.C1.bias
primals_4 = self.C2.weight
primals_5 = self.C2.bias
primals_6 = self.C31.weight
primals_7 = self.C31.bias
primals_8 = self.C32.weight
primals_9 = self.C32.bias
primals_10 = self.C4.weight
primals_11 = self.C4.bias
primals_12 = self.C5.weight
primals_13 = self.C5.bias
primals_14 = self.C6.weight
primals_15 = self.C6.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0], output[1], output[2], output[3]
| aasensio/neural_hinode | VAE | false | 3,028 | [
"MIT"
] | 0 | 63ec076d920f82343618ce67669c73a3b5209957 | https://github.com/aasensio/neural_hinode/tree/63ec076d920f82343618ce67669c73a3b5209957 | import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.init as init
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
class Model(nn.Module):
def __init__(self, length, n_latent):
super().__init__()
self.length = length
self.n_latent = n_latent
self.C1 = nn.Conv2d(self.length, 400, kernel_size=1)
self.C2 = nn.Conv2d(400, 400, kernel_size=1)
self.C31 = nn.Conv2d(400, self.n_latent, kernel_size=1)
self.C32 = nn.Conv2d(400, self.n_latent, kernel_size=1)
self.C4 = nn.Conv2d(self.n_latent, 400, kernel_size=1)
self.C5 = nn.Conv2d(400, 400, kernel_size=1)
self.C6 = nn.Conv2d(400, self.length, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def weights_init(self):
for module in self.modules():
kaiming_init(module)
def encode(self, x):
out = self.C1(x)
out = self.relu(out)
out = self.C2(out)
out = self.relu(out)
mu = self.C31(out)
logvar = self.C32(out)
return mu, logvar
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
out = self.C4(z)
out = self.relu(out)
out = self.C5(out)
out = self.relu(out)
out = self.C6(out)
return out
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), z, mu, logvar
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
UGRNNLRCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/id/cidxuzakshtvnrldbbqkyga2amymtgyw2cvpqv7d2kjchobiepep.py
# Topologically Sorted Source Nodes: [pre_comp1, pre_comp2, add_2, z, add_3, c, mul, sub, mul_1, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add_2 => add_2
# add_3 => add_3
# c => tanh
# mul => mul
# mul_1 => mul_1
# new_h => add_4
# pre_comp1 => add
# pre_comp2 => add_1
# sub => sub
# z => sigmoid
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %view_7), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_7), kwargs = {})
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_2,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_8), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_3,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + (x2), xmask)
tmp7 = tl.load(in_ptr2 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp5
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
tl.store(in_out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wComp1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wComp2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [uComp1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [uComp2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0), primals_6, out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pre_comp1, pre_comp2, add_2, z, add_3, c, mul, sub, mul_1, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf4, buf5, buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_7
del primals_8
return (buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class UGRNNLRCell(RNNCell):
"""
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='UGRNNLR'):
super(UGRNNLRCell, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'UGRNNLR'
def forward(self, input, state):
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
else:
wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2)
if self._uRank is None:
uComp1 = torch.matmul(state, self.U1)
uComp2 = torch.matmul(state, self.U2)
else:
uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1)
uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
z = gen_nonlinearity(pre_comp1 + self.bias_gate, self.
_gate_nonlinearity)
c = gen_nonlinearity(pre_comp2 + self.bias_update, self.
_update_nonlinearity)
new_h = z * state + (1.0 - z) * c
return new_h
def getVars(self):
Vars = []
if self._num_W_matrices == 2:
Vars.extend([self.W1, self.W2])
else:
Vars.extend([self.W, self.W1, self.W2])
if self._num_U_matrices == 2:
Vars.extend([self.U1, self.U2])
else:
Vars.extend([self.U, self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
return Vars
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp5
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (64, 4), (4, 1), 0),
primals_6, out=buf3)
del primals_6
buf4 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf4, buf5,
buf2, primals_7, buf3, primals_8, primals_5, buf6, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_7
del primals_8
return buf6, primals_5, buf4, buf5, reinterpret_tensor(primals_2, (4,
64), (1, 4), 0)
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class UGRNNLRCellNew(RNNCell):
"""
UGRNN LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 3 matrices if not None else creates 2 matrices)
uRank = rank of U matrix
(creates 3 matrices if not None else creates 2 matrices)
UGRNN architecture and compression techniques are found in
UGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(W1x_t + U1h_{t-1} + B_g)
h_t^ = update_nl(W1x_t + U1h_{t-1} + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='UGRNNLR'):
super(UGRNNLRCellNew, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 2, 2, 2, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'UGRNNLR'
def getVars(self):
Vars = []
if self._num_W_matrices == 2:
Vars.extend([self.W1, self.W2])
else:
Vars.extend([self.W, self.W1, self.W2])
if self._num_U_matrices == 2:
Vars.extend([self.U1, self.U2])
else:
Vars.extend([self.U, self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
return Vars
def forward(self, input_0, input_1):
primals_1 = self.W1
primals_3 = self.W2
primals_4 = self.U1
primals_6 = self.U2
primals_7 = self.bias_gate
primals_8 = self.bias_update
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| adityakusupati/EdgeML | UGRNNLRCell | false | 3,029 | [
"MIT"
] | 0 | 65933a6fdfc38945f4311043a62e120784b2b0bf | https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf | import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super().__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
# ... truncated (>4000 chars) for memory efficiency |
LSTMPredictor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ky/cky64l574tkwxzjewzevqyhty73x4t3q4p6d2tu2humfvstjwiaa.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 32), (32, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf3, 2048, grid=grid(2048), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 32), (32, 1), 0), reinterpret_tensor(primals_4, (32, 4), (1, 32), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 32), (32, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LSTMPredictor(nn.Module):
def __init__(self, look_back, target_days):
super(LSTMPredictor, self).__init__()
self.layer_a = nn.Linear(look_back, 32)
self.relu = nn.ReLU()
self.output = nn.Linear(32, target_days)
def predict(self, input):
with torch.no_grad():
return self.forward(input).tolist()
def forward(self, input):
logits = self.output(self.relu(self.layer_a(input)))
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'look_back': 4, 'target_days': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (32, 4), (4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 32), (32, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 32), (32, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 32), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 32), (512, 128, 32, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 32), (512, 128, 32, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(2048)](buf1,
primals_2, buf3, 2048, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 32),
(32, 1), 0), reinterpret_tensor(primals_4, (32, 4), (1, 32), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 32), (32, 1), 0), primals_4, buf3
class LSTMPredictorNew(nn.Module):
def __init__(self, look_back, target_days):
super(LSTMPredictorNew, self).__init__()
self.layer_a = nn.Linear(look_back, 32)
self.relu = nn.ReLU()
self.output = nn.Linear(32, target_days)
def predict(self, input):
with torch.no_grad():
return self.forward(input).tolist()
def forward(self, input_0):
primals_1 = self.layer_a.weight
primals_2 = self.layer_a.bias
primals_4 = self.output.weight
primals_5 = self.output.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| Yu-Hao-88/NN_stock_prediction | LSTMPredictor | false | 3,030 | [
"MIT"
] | 0 | bb84a3f4450d95af317d60d83dcd53ad4f3d350d | https://github.com/Yu-Hao-88/NN_stock_prediction/tree/bb84a3f4450d95af317d60d83dcd53ad4f3d350d | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, look_back, target_days):
super().__init__()
self.layer_a = nn.Linear(look_back, 32)
self.relu = nn.ReLU()
self.output = nn.Linear(32, target_days)
def predict(self, input):
with torch.no_grad():
return self.forward(input).tolist()
def forward(self, input):
logits = self.output(self.relu(self.layer_a(input)))
return logits
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ReconstructionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [x_relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_out => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super(Conv2dSame, self).__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
class ReconstructionBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.
BatchNorm2d, use_dropout=False):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ReconstructionBlock, self).__init__()
self.C1R1 = Conv2dSame(input_nc, output_nc, kernel_size=1)
def forward(self, input):
"""Forward function (with skip connections)"""
return self.C1R1(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'output_nc': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super(Conv2dSame, self).__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
class ReconstructionBlockNew(nn.Module):
"""Define a Resnet block"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.
BatchNorm2d, use_dropout=False):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ReconstructionBlockNew, self).__init__()
self.C1R1 = Conv2dSame(input_nc, output_nc, kernel_size=1)
def forward(self, input_0):
primals_2 = self.C1R1.layer.weight
primals_3 = self.C1R1.layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| adityamehta00/HIDeGAN | ReconstructionBlock | false | 3,031 | [
"BSD-3-Clause"
] | 0 | 91a0674e092ccde2784a82bf927dfefd8673eb4c | https://github.com/adityamehta00/HIDeGAN/tree/91a0674e092ccde2784a82bf927dfefd8673eb4c | import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super().__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
class Model(nn.Module):
"""Define a Resnet block"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.
BatchNorm2d, use_dropout=False):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super().__init__()
self.C1R1 = Conv2dSame(input_nc, output_nc, kernel_size=1)
def forward(self, input):
"""Forward function (with skip connections)"""
return self.C1R1(input)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
FastRNNCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/tb/ctbc62iwsfjylunbfj67j7mqt6piotcv6s56drmljjg2lp7vfyhv.py
# Topologically Sorted Source Nodes: [pre_comp, add_1, c, sigmoid, mul, sigmoid_1, mul_1, new_h], Original ATen: [aten.add, aten.tanh, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# c => tanh
# mul => mul
# mul_1 => mul_1
# new_h => add_2
# pre_comp => add
# sigmoid => sigmoid
# sigmoid_1 => sigmoid_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_5), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_1,), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_6,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_7,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %tanh), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + (x2), xmask)
tmp11 = tl.load(in_ptr4 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp13 = tl.sigmoid(tmp12)
tmp14 = tmp13 * tmp5
tmp15 = tmp10 + tmp14
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, 1), (1, 1))
assert_size_stride(primals_7, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wComp], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [uComp], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pre_comp, add_1, c, sigmoid, mul, sigmoid_1, mul_1, new_h], Original ATen: [aten.add, aten.tanh, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_tanh_0.run(buf2, buf1, primals_5, primals_6, primals_4, primals_7, buf3, 256, grid=grid(256), stream=stream0)
del buf1
del primals_5
return (buf3, primals_4, primals_6, primals_7, buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class FastRNNCell(RNNCell):
"""
FastRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
wSparsity = intended sparsity of W matrix(ces)
uSparsity = intended sparsity of U matrix(ces)
Warning:
The Cell will not automatically sparsify.
The user must invoke .sparsify to hard threshold.
alphaInit = init for alpha, the update scalar
betaInit = init for beta, the weight for previous state
FastRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
"""
def __init__(self, input_size, hidden_size, update_nonlinearity='tanh',
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=-
3.0, betaInit=3.0, name='FastRNN'):
super(FastRNNCell, self).__init__(input_size, hidden_size, None,
update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity)
self._alphaInit = alphaInit
self._betaInit = betaInit
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])
)
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1]))
self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1]))
@property
def name(self):
return self._name
@property
def cellType(self):
return 'FastRNN'
def forward(self, input, state):
if self._wRank is None:
wComp = torch.matmul(input, self.W)
else:
wComp = torch.matmul(torch.matmul(input, self.W1), self.W2)
if self._uRank is None:
uComp = torch.matmul(state, self.U)
else:
uComp = torch.matmul(torch.matmul(state, self.U1), self.U2)
pre_comp = wComp + uComp
c = gen_nonlinearity(pre_comp + self.bias_update, self.
_update_nonlinearity)
new_h = torch.sigmoid(self.beta) * state + torch.sigmoid(self.alpha
) * c
return new_h
def getVars(self):
Vars = []
if self._num_W_matrices == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_U_matrices == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_update])
Vars.extend([self.alpha, self.beta])
return Vars
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK])
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp11 = tl.load(in_ptr4 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = libdevice.tanh(tmp4)
tmp8 = tl.sigmoid(tmp7)
tmp10 = tmp8 * tmp9
tmp13 = tl.sigmoid(tmp12)
tmp14 = tmp13 * tmp5
tmp15 = tmp10 + tmp14
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, 1), (1, 1))
assert_size_stride(primals_7, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_tanh_0[grid(256)](buf2, buf1,
primals_5, primals_6, primals_4, primals_7, buf3, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del buf1
del primals_5
return buf3, primals_4, primals_6, primals_7, buf2, reinterpret_tensor(
primals_2, (4, 64), (1, 4), 0)
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class FastRNNCellNew(RNNCell):
"""
FastRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
wSparsity = intended sparsity of W matrix(ces)
uSparsity = intended sparsity of U matrix(ces)
Warning:
The Cell will not automatically sparsify.
The user must invoke .sparsify to hard threshold.
alphaInit = init for alpha, the update scalar
betaInit = init for beta, the weight for previous state
FastRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = sigmoid(beta)*h_{t-1} + sigmoid(alpha)*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
"""
def __init__(self, input_size, hidden_size, update_nonlinearity='tanh',
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0, alphaInit=-
3.0, betaInit=3.0, name='FastRNN'):
super(FastRNNCellNew, self).__init__(input_size, hidden_size, None,
update_nonlinearity, 1, 1, 1, wRank, uRank, wSparsity, uSparsity)
self._alphaInit = alphaInit
self._betaInit = betaInit
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])
)
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.alpha = nn.Parameter(self._alphaInit * torch.ones([1, 1]))
self.beta = nn.Parameter(self._betaInit * torch.ones([1, 1]))
@property
def name(self):
return self._name
@property
def cellType(self):
return 'FastRNN'
def getVars(self):
Vars = []
if self._num_W_matrices == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_U_matrices == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_update])
Vars.extend([self.alpha, self.beta])
return Vars
def forward(self, input_0, input_1):
primals_1 = self.W
primals_3 = self.U
primals_5 = self.bias_update
primals_6 = self.alpha
primals_7 = self.beta
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| adityakusupati/EdgeML | FastRNNCell | false | 3,032 | [
"MIT"
] | 0 | 65933a6fdfc38945f4311043a62e120784b2b0bf | https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf | import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super().__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
# ... truncated (>4000 chars) for memory efficiency |
LocallyConnectedLayer1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7k/c7krdcm5pwx4sfbo4gtruaxtypgcvwe3uwc7sp564j4ubtch63ke.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [0, 3], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x1 = (xindex // 7)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + (4*x1)), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/52/c52rlzit6alxkn2xl3xucbea2dque55ic3yvcbpctw5tc7viuwtv.py
# Topologically Sorted Source Nodes: [mul, out], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# mul => mul
# out => sum_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unfold, %primals_2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [4]), kwargs = {})
triton_poi_fused_mul_sum_1 = async_compile.triton('triton_poi_fused_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (7*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + x0 + (7*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + x0 + (7*x1)), xmask)
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + x0 + (7*x1)), xmask)
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 7), (112, 28, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 448, grid=grid(448), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, out], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_1.run(buf0, primals_2, buf1, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf1, reinterpret_tensor(buf0, (4, 4, 4, 4, 4), (112, 28, 7, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class LocallyConnectedLayer1d(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
True, bias=False):
"""
Defines one locally connected layer for one dimensional vector input.
NOTE: This model only takes one-dimensional inputs. Batched inputs are also fine.
input_dim: column size of weight matrix
output_dim: row size of weight matrix
kernel_size: number of local connections per parameter
stride: number of strides of local connections, CANNOT BE ZERO
padding: whether or not to zero pad
bias: whether or not to have a bias term
"""
super(LocallyConnectedLayer1d, self).__init__()
initrange = 0.1
self.weight = nn.Parameter(torch.empty(output_dim, kernel_size))
torch.nn.init.uniform_(self.weight, -initrange, initrange)
if bias:
self.bias = nn.Parameter(torch.zeros(output_dim))
else:
self.register_parameter('bias', None)
self.kernel_size = kernel_size
self.stride = stride
if padding is True:
pad_size = stride * (output_dim - 1) + kernel_size - input_dim
self.input_dim = input_dim + pad_size
self.pad = True
else:
resulting_dim = (input_dim - kernel_size) / stride + 1
self.extra_dim = output_dim - resulting_dim
self.pad = False
def forward(self, x):
k = self.kernel_size
s = self.stride
instance_dim = len(x.size()) - 1
if self.pad:
pad_size = self.input_dim - x.size()[instance_dim]
if pad_size >= 0:
pad = 0, pad_size
x = F.pad(x, pad=pad)
x = x.unfold(instance_dim, k, s)
else:
x = x.unfold(instance_dim, k, s)
if instance_dim == 0:
x = x[:pad_size]
else:
x = x[:, :pad_size, :]
else:
x = x.unfold(instance_dim, k, s)
for i in self.extra_dim:
if instance_dim == 0:
x = torch.cat((x, x[-1]), dim=instance_dim)
else:
x = torch.cat((x, x[:, -1, :]), dim=instance_dim)
out = torch.sum(x * self.weight, dim=instance_dim + 1)
if self.bias is not None:
out += self.bias
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4,
'stride': 1}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 448
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x1 = xindex // 7
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 4, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp2 & xmask, other=0.0)
tl.store(out_ptr0 + x2, tmp3, xmask)
@triton.jit
def triton_poi_fused_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 7 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + x0 + 7 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + x0 + 7 * x1), xmask)
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + x0 + 7 * x1), xmask)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 7), (112, 28, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(448)](primals_1, buf0, 448,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_mul_sum_1[grid(256)](buf0, primals_2, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, reinterpret_tensor(buf0, (4, 4, 4, 4, 4), (112, 28, 7, 1,
1), 0)
class LocallyConnectedLayer1dNew(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
True, bias=False):
"""
Defines one locally connected layer for one dimensional vector input.
NOTE: This model only takes one-dimensional inputs. Batched inputs are also fine.
input_dim: column size of weight matrix
output_dim: row size of weight matrix
kernel_size: number of local connections per parameter
stride: number of strides of local connections, CANNOT BE ZERO
padding: whether or not to zero pad
bias: whether or not to have a bias term
"""
super(LocallyConnectedLayer1dNew, self).__init__()
initrange = 0.1
self.weight = nn.Parameter(torch.empty(output_dim, kernel_size))
torch.nn.init.uniform_(self.weight, -initrange, initrange)
if bias:
self.bias = nn.Parameter(torch.zeros(output_dim))
else:
self.register_parameter('bias', None)
self.kernel_size = kernel_size
self.stride = stride
if padding is True:
pad_size = stride * (output_dim - 1) + kernel_size - input_dim
self.input_dim = input_dim + pad_size
self.pad = True
else:
resulting_dim = (input_dim - kernel_size) / stride + 1
self.extra_dim = output_dim - resulting_dim
self.pad = False
def forward(self, input_0):
primals_2 = self.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| adityayedetore/LCRNN | LocallyConnectedLayer1d | false | 3,033 | [
"MIT"
] | 0 | 7b6afaf6098fed584b90fe0196cfd26aa6a190c5 | https://github.com/adityayedetore/LCRNN/tree/7b6afaf6098fed584b90fe0196cfd26aa6a190c5 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.onnx
class Model(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride, padding=
True, bias=False):
"""
Defines one locally connected layer for one dimensional vector input.
NOTE: This model only takes one-dimensional inputs. Batched inputs are also fine.
input_dim: column size of weight matrix
output_dim: row size of weight matrix
kernel_size: number of local connections per parameter
stride: number of strides of local connections, CANNOT BE ZERO
padding: whether or not to zero pad
bias: whether or not to have a bias term
"""
super().__init__()
initrange = 0.1
self.weight = nn.Parameter(torch.empty(output_dim, kernel_size))
torch.nn.init.uniform_(self.weight, -initrange, initrange)
if bias:
self.bias = nn.Parameter(torch.zeros(output_dim))
else:
self.register_parameter('bias', None)
self.kernel_size = kernel_size
self.stride = stride
if padding is True:
pad_size = stride * (output_dim - 1) + kernel_size - input_dim
self.input_dim = input_dim + pad_size
self.pad = True
else:
resulting_dim = (input_dim - kernel_size) / stride + 1
self.extra_dim = output_dim - resulting_dim
self.pad = False
def forward(self, x):
k = self.kernel_size
s = self.stride
instance_dim = len(x.size()) - 1
if self.pad:
pad_size = self.input_dim - x.size()[instance_dim]
if pad_size >= 0:
pad = 0, pad_size
x = F.pad(x, pad=pad)
x = x.unfold(instance_dim, k, s)
else:
x = x.unfold(instance_dim, k, s)
if instance_dim == 0:
x = x[:pad_size]
else:
x = x[:, :pad_size, :]
else:
x = x.unfold(instance_dim, k, s)
for i in self.extra_dim:
if instance_dim == 0:
x = torch.cat((x, x[-1]), dim=instance_dim)
else:
x = torch.cat((x, x[:, -1, :]), dim=instance_dim)
out = torch.sum(x * self.weight, dim=instance_dim + 1)
if self.bias is not None:
out += self.bias
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4,
'stride': 1}]
|
AugCNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ur/curzudn4ai4j7lgrmbqwy57jpcw3gylwk4nkg6jt7lqh577w5ku7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 49152, grid=grid(49152), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNN(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNN, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, obs):
return self.aug(obs)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNNNew(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNNNew, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, input_0):
primals_2 = self.aug.weight
primals_3 = self.aug.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| agarwl/auto-drac | AugCNN | false | 3,034 | [
"MIT"
] | 0 | d86c480b51929e6e4ec0ae1adba84d9f78e91705 | https://github.com/agarwl/auto-drac/tree/d86c480b51929e6e4ec0ae1adba84d9f78e91705 | import torch
import torch.nn as nn
import torch.nn.functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class Model(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super().__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, obs):
return self.aug(obs)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return []
|
GRULRCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/3l/c3lvfnzdm4j54qmpqlq4wtibe7wlwzukm4xfgt3bw5bdnfre2jng.py
# Topologically Sorted Source Nodes: [pre_comp1, add_2, r, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.sigmoid_backward]
# Source node to ATen node mapping:
# add_2 => add_2
# mul => mul
# pre_comp1 => add
# r => sigmoid
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_7), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_8), kwargs = {})
# %sigmoid : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_6), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub_3), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp5
tmp10 = tmp5 * tmp9
tl.store(out_ptr0 + (x2), tmp7, xmask)
tl.store(out_ptr1 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/lz/clzjpvamptc4sf4uhsz3jl6umhb36eb2okmo444rgajkmwalgs3q.py
# Topologically Sorted Source Nodes: [pre_comp2, add_3, z, pre_comp3, add_5, c, mul_1, sub, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add_3 => add_3
# add_5 => add_5
# c => tanh
# mul_1 => mul_1
# mul_2 => mul_2
# new_h => add_6
# pre_comp2 => add_1
# pre_comp3 => add_4
# sub => sub
# z => sigmoid_1
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %view_9), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %primals_9), kwargs = {})
# %sigmoid_1 : [num_users=3] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_3,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_11), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %primals_11), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_5,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %primals_6), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %tanh), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + (x2), xmask)
tmp7 = tl.load(in_ptr2 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp5
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tl.store(in_out_ptr0 + (x2), tmp5, xmask)
tl.store(in_out_ptr1 + (x2), tmp11, xmask)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wComp1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wComp2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wComp3], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [uComp1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_5, out=buf3)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [uComp2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), primals_7, out=buf4)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pre_comp1, add_2, r, mul], Original ATen: [aten.add, aten.sigmoid, aten.mul, aten.sigmoid_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0.run(buf0, buf3, primals_8, primals_6, buf6, buf10, 256, grid=grid(256), stream=stream0)
del primals_8
buf7 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0), primals_10, out=buf7)
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [pre_comp2, add_3, z, pre_comp3, add_5, c, mul_1, sub, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub]
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1.run(buf5, buf8, buf4, primals_9, buf7, primals_11, primals_6, buf9, 256, grid=grid(256), stream=stream0)
del buf4
del buf7
del primals_11
del primals_9
return (buf9, primals_6, buf5, buf8, reinterpret_tensor(buf6, (4, 64), (1, 4), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), buf10, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class GRULRCell(RNNCell):
"""
GRU LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 4 matrices if not None else creates 3 matrices)
uRank = rank of U matrix
(creates 4 matrices if not None else creates 3 matrices)
GRU architecture and compression techniques are found in
GRU(LINK) paper
Basic architecture is like:
r_t = gate_nl(W1x_t + U1h_{t-1} + B_r)
z_t = gate_nl(W2x_t + U2h_{t-1} + B_g)
h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='GRULR'):
super(GRULRCell, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_r = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'GRULR'
def forward(self, input, state):
if self._wRank is None:
wComp1 = torch.matmul(input, self.W1)
wComp2 = torch.matmul(input, self.W2)
wComp3 = torch.matmul(input, self.W3)
else:
wComp1 = torch.matmul(torch.matmul(input, self.W), self.W1)
wComp2 = torch.matmul(torch.matmul(input, self.W), self.W2)
wComp3 = torch.matmul(torch.matmul(input, self.W), self.W3)
if self._uRank is None:
uComp1 = torch.matmul(state, self.U1)
uComp2 = torch.matmul(state, self.U2)
else:
uComp1 = torch.matmul(torch.matmul(state, self.U), self.U1)
uComp2 = torch.matmul(torch.matmul(state, self.U), self.U2)
pre_comp1 = wComp1 + uComp1
pre_comp2 = wComp2 + uComp2
r = gen_nonlinearity(pre_comp1 + self.bias_r, self._gate_nonlinearity)
z = gen_nonlinearity(pre_comp2 + self.bias_gate, self.
_gate_nonlinearity)
if self._uRank is None:
pre_comp3 = wComp3 + torch.matmul(r * state, self.U3)
else:
pre_comp3 = wComp3 + torch.matmul(torch.matmul(r * state, self.
U), self.U3)
c = gen_nonlinearity(pre_comp3 + self.bias_update, self.
_update_nonlinearity)
new_h = z * state + (1.0 - z) * c
return new_h
def getVars(self):
Vars = []
if self._num_W_matrices == 3:
Vars.extend([self.W1, self.W2, self.W3])
else:
Vars.extend([self.W, self.W1, self.W2, self.W3])
if self._num_U_matrices == 3:
Vars.extend([self.U1, self.U2, self.U3])
else:
Vars.extend([self.U, self.U1, self.U2, self.U3])
Vars.extend([self.bias_r, self.bias_gate, self.bias_update])
return Vars
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp8 = 1.0
tmp9 = tmp8 - tmp5
tmp10 = tmp5 * tmp9
tl.store(out_ptr0 + x2, tmp7, xmask)
tl.store(out_ptr1 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_1(in_out_ptr0, in_out_ptr1,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_out_ptr1 + x2, xmask)
tmp7 = tl.load(in_ptr2 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp11 = libdevice.tanh(tmp10)
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp14 - tmp5
tmp16 = tmp15 * tmp11
tmp17 = tmp13 + tmp16
tl.store(in_out_ptr0 + x2, tmp5, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (1, 4), (4, 1))
assert_size_stride(primals_9, (1, 4), (4, 1))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_4, out=buf2)
del primals_4
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
primals_5, out=buf3)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
primals_7, out=buf4)
del primals_7
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sigmoid_backward_0[grid(256)](buf0,
buf3, primals_8, primals_6, buf6, buf10, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_8
buf7 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf6, (64, 4), (4, 1), 0),
primals_10, out=buf7)
buf5 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
buf8 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf9 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
triton_poi_fused_add_mul_rsub_sigmoid_tanh_1[grid(256)](buf5, buf8,
buf4, primals_9, buf7, primals_11, primals_6, buf9, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del buf4
del buf7
del primals_11
del primals_9
return buf9, primals_6, buf5, buf8, reinterpret_tensor(buf6, (4, 64), (
1, 4), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0
), buf10, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0)
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class GRULRCellNew(RNNCell):
"""
GRU LR Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix
(creates 4 matrices if not None else creates 3 matrices)
uRank = rank of U matrix
(creates 4 matrices if not None else creates 3 matrices)
GRU architecture and compression techniques are found in
GRU(LINK) paper
Basic architecture is like:
r_t = gate_nl(W1x_t + U1h_{t-1} + B_r)
z_t = gate_nl(W2x_t + U2h_{t-1} + B_g)
h_t^ = update_nl(W3x_t + r_t*U3(h_{t-1}) + B_h)
h_t = z_t*h_{t-1} + (1-z_t)*h_t^
Wi and Ui can further parameterised into low rank version by
Wi = matmul(W, W_i) and Ui = matmul(U, U_i)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, name='GRULR'):
super(GRULRCellNew, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 3, 3, 3, wRank, uRank,
wSparsity, uSparsity)
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W2 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
self.W3 = nn.Parameter(0.1 * torch.randn([input_size, hidden_size])
)
else:
self.W = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W1 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
self.W3 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([hidden_size,
hidden_size]))
else:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U1 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.U3 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_r = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self._device = self.bias_update.device
@property
def name(self):
return self._name
@property
def cellType(self):
return 'GRULR'
def getVars(self):
Vars = []
if self._num_W_matrices == 3:
Vars.extend([self.W1, self.W2, self.W3])
else:
Vars.extend([self.W, self.W1, self.W2, self.W3])
if self._num_U_matrices == 3:
Vars.extend([self.U1, self.U2, self.U3])
else:
Vars.extend([self.U, self.U1, self.U2, self.U3])
Vars.extend([self.bias_r, self.bias_gate, self.bias_update])
return Vars
def forward(self, input_0, input_1):
primals_1 = self.W1
primals_3 = self.W2
primals_4 = self.W3
primals_5 = self.U1
primals_7 = self.U2
primals_10 = self.U3
primals_8 = self.bias_r
primals_9 = self.bias_gate
primals_11 = self.bias_update
primals_2 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| adityakusupati/EdgeML | GRULRCell | false | 3,035 | [
"MIT"
] | 0 | 65933a6fdfc38945f4311043a62e120784b2b0bf | https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf | import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super().__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
# ... truncated (>4000 chars) for memory efficiency |
RGBDiff | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/fl/cfllfuxu6opan6gqlsakp4ldgmsmzrsbicbfnt43sgrqiliz3dwx.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sub, %sub_1, %sub_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 3
x0 = xindex % 16
x2 = (xindex // 48)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tmp20 = tl.full([1], 3, tl.int64)
tmp21 = tmp0 < tmp20
tmp22 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 - tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp13, tmp18, tmp26)
tmp28 = tl.where(tmp4, tmp9, tmp27)
tl.store(out_ptr0 + (x3), tmp28, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
class RGBDiff(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, image):
"""
Args:
image (torch.Tensor): (N x T x C x H x W)
"""
diffs = []
for i in range(1, image.size(self.dim)):
prev = image.index_select(self.dim, image.new_tensor(i - 1,
dtype=torch.long))
current = image.index_select(self.dim, image.new_tensor(i,
dtype=torch.long))
diffs.append(current - prev)
return torch.cat(diffs, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 3
x0 = xindex % 16
x2 = xindex // 48
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 2, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tmp10 & tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype)
tmp18 = tl.where(tmp13, tmp16, tmp17)
tmp19 = tmp0 >= tmp11
tl.full([1], 3, tl.int64)
tmp22 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp23 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp24 = tmp22 - tmp23
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp19, tmp24, tmp25)
tmp27 = tl.where(tmp13, tmp18, tmp26)
tmp28 = tl.where(tmp4, tmp9, tmp27)
tl.store(out_ptr0 + x3, tmp28, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class RGBDiffNew(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| JinYAnGHe/openvino_training_extensions | RGBDiff | false | 3,036 | [
"Apache-2.0"
] | 0 | a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | https://github.com/JinYAnGHe/openvino_training_extensions/tree/a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee | import torch
from torch import nn
from torchvision import models as models
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torch.onnx
class Model(nn.Module):
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, image):
"""
Args:
image (torch.Tensor): (N x T x C x H x W)
"""
diffs = []
for i in range(1, image.size(self.dim)):
prev = image.index_select(self.dim, image.new_tensor(i - 1,
dtype=torch.long))
current = image.index_select(self.dim, image.new_tensor(i,
dtype=torch.long))
diffs.append(current - prev)
return torch.cat(diffs, dim=self.dim)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
BiaffineScorer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/am/cambydvhijs7vhod3hwt5aje7cqigviqetkgm6iccyrgbwhmprcb.py
# Topologically Sorted Source Nodes: [input1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# input1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %full_default], 3), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/t6/ct6f57cdvyh3ahq6iwyawuy7577bar2ftumjxqllolmn4c4lh7ph.py
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.add]
# Source node to ATen node mapping:
# bilinear => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_4), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [input1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 320, grid=grid(320), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [input2], Original ATen: [aten.cat]
triton_poi_fused_cat_0.run(primals_2, buf1, 320, grid=grid(320), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten._trilinear]
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5), (5, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [bilinear], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf4, primals_4, 256, grid=grid(256), stream=stream0)
del primals_4
return (buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0), reinterpret_tensor(buf1, (64, 5), (5, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 5, 5), (25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BiaffineScorer(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1,
output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input1, input2):
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)
], len(input1.size()) - 1)
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)
], len(input2.size()) - 1)
return self.W_bilin(input1, input2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input1_size': 4, 'input2_size': 4, 'output_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = 1.0
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp6, tmp9, tmp10)
tmp12 = tl.where(tmp4, tmp5, tmp11)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 5, 5), (25, 5, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(320)](primals_1, buf0, 320, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 5), (80, 20, 5, 1), torch.float32)
triton_poi_fused_cat_0[grid(320)](primals_2, buf1, 320, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_2
buf2 = torch.ops.aten._trilinear.default(reinterpret_tensor(buf0, (
64, 5), (5, 1), 0), primals_3, reinterpret_tensor(buf1, (64, 5),
(5, 1), 0), [1, 3], [0], [1, 2], [2, 3])
del primals_3
buf3 = buf2
del buf2
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
triton_poi_fused_add_1[grid(256)](buf4, primals_4, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_4
return buf4, reinterpret_tensor(buf0, (64, 5), (5, 1), 0
), reinterpret_tensor(buf1, (64, 5), (5, 1), 0)
class BiaffineScorerNew(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1,
output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input_0, input_1):
primals_3 = self.W_bilin.weight
primals_4 = self.W_bilin.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| WEYAI/PhoNLP | BiaffineScorer | false | 3,037 | [
"Apache-2.0"
] | 0 | 8fefe49965dc6346c224a5636d9333a7ddf55a2c | https://github.com/WEYAI/PhoNLP/tree/8fefe49965dc6346c224a5636d9333a7ddf55a2c | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, input1_size, input2_size, output_size):
super().__init__()
self.W_bilin = nn.Bilinear(input1_size + 1, input2_size + 1,
output_size)
self.W_bilin.weight.data.zero_()
self.W_bilin.bias.data.zero_()
def forward(self, input1, input2):
input1 = torch.cat([input1, input1.new_ones(*input1.size()[:-1], 1)
], len(input1.size()) - 1)
input2 = torch.cat([input2, input2.new_ones(*input2.size()[:-1], 1)
], len(input2.size()) - 1)
return self.W_bilin(input1, input2)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
DHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jo/cjoofcjehyfidfaq6nxxxexokj7v7ef2gisvoxogj4junffjxqhc.py
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv2d => convolution
# output => sigmoid
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 14884
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_2, (1, ), (1, ))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 61, 61), (3721, 3721, 61, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, output], Original ATen: [aten.convolution, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0.run(buf1, primals_2, 14884, grid=grid(14884), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 256, 64, 64), (1048576, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from math import *
class DHead(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(256, 1, 4)
def forward(self, x):
output = torch.sigmoid(self.conv(x))
return output
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from math import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_sigmoid_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 14884
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp3 = tmp0 + tmp2
tmp4 = tl.sigmoid(tmp3)
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (1, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_2, (1,), (1,))
assert_size_stride(primals_3, (4, 256, 64, 64), (1048576, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 1, 61, 61), (3721, 3721, 61, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_sigmoid_0[grid(14884)](buf1, primals_2,
14884, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3, buf1
class DHeadNew(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(256, 1, 4)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| a8252525/NIID-Bench | DHead | false | 3,038 | [
"MIT"
] | 0 | 33df8d3a7b941884eec3c7bd52adb8a9476eb282 | https://github.com/a8252525/NIID-Bench/tree/33df8d3a7b941884eec3c7bd52adb8a9476eb282 | import torch
import torch.nn as nn
from math import *
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(256, 1, 4)
def forward(self, x):
output = torch.sigmoid(self.conv(x))
return output
def get_inputs():
return [torch.rand([4, 256, 64, 64])]
def get_init_inputs():
return []
|
FeatureExtractionBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/rm/crmfe66pk7yrrmxujjf347i7knjkuxqdl4al5r7rzszjgyqvbzyx.py
# Topologically Sorted Source Nodes: [x_pad, x_relu], Original ATen: [aten.constant_pad_nd, aten.relu]
# Source node to ATen node mapping:
# x_pad => constant_pad_nd
# x_relu => relu
# Graph fragment:
# %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%constant_pad_nd,), kwargs = {})
triton_poi_fused_constant_pad_nd_relu_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 6
x0 = xindex % 6
x2 = (xindex // 36)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x4), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/e6/ce6obqynchxhxolamqlbipmwc6fxzfi24eqt2ynzowrgkrkzg4nv.py
# Topologically Sorted Source Nodes: [x_out, x_relu_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_out => convolution
# x_relu_3 => relu_3
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/kn/cknv3ppkmiow4nc3max3akdwyy3mx37cmh36ja4qiss5bnpfl5pr.py
# Topologically Sorted Source Nodes: [x_out, x_out_1, add, x_out_3, add_1, x_out_5, add_2], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# x_out => convolution
# x_out_1 => convolution_1
# x_out_3 => convolution_3
# x_out_5 => convolution_5
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %convolution_1), kwargs = {})
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %convolution_3), kwargs = {})
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_8, %primals_9, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %convolution_5), kwargs = {})
triton_poi_fused_add_convolution_2 = async_compile.triton('triton_poi_fused_add_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), None)
tmp4 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x3), None)
tmp8 = tl.load(in_ptr4 + (x1), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr5 + (x3), None)
tmp12 = tl.load(in_ptr6 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tl.store(in_out_ptr0 + (x3), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_pad, x_relu], Original ATen: [aten.constant_pad_nd, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_relu_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
# Topologically Sorted Source Nodes: [x_out_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_out, x_relu_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf1, primals_3, buf3, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [x_out_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_out_1, x_relu_5], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_5, buf5, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [x_out_5], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 4, 4), (1024, 16, 4, 1))
buf7 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_out, x_out_1, add, x_out_3, add_1, x_out_5, add_2], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_2.run(buf7, primals_3, buf2, primals_5, buf4, primals_7, buf6, primals_9, 4096, grid=grid(4096), stream=stream0)
del buf2
del buf4
del buf6
del primals_3
del primals_5
del primals_7
del primals_9
return (buf7, primals_2, primals_4, primals_6, primals_8, buf0, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 1, 1), (64, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super(Conv2dSame, self).__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
class FeatureExtractionBlock(nn.Module):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
def __init__(self, input_nc, ngf=64, norm_layer=nn.BatchNorm2d,
use_dropout=False):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
super(FeatureExtractionBlock, self).__init__()
self.C3R1 = Conv2dSame(input_nc, ngf, kernel_size=3)
self.C1R1 = Conv2dSame(ngf, ngf, kernel_size=1)
self.C3R2 = Conv2dSame(input_nc, ngf, kernel_size=3)
self.C1R2 = Conv2dSame(ngf, ngf, kernel_size=1)
def forward(self, input):
return self.C3R1(input) + self.C3R2(input) + self.C1R1(self.C3R1(input)
) + self.C1R2(self.C3R2(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import math
import torch.utils.data
import torch
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_relu_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x4, tmp13, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_2(in_out_ptr0, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, None)
tmp4 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x3, None)
tmp8 = tl.load(in_ptr4 + x1, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr5 + x3, None)
tmp12 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tl.store(in_out_ptr0 + x3, tmp14, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 1, 1), (64, 1, 1, 1))
assert_size_stride(primals_9, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_relu_0[grid(576)](primals_1, buf0,
576, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 64, 4, 4), (1024, 16, 4, 1))
buf2 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_relu_1[grid(4096)](buf1, primals_3,
buf3, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_relu_1[grid(4096)](buf2, primals_5,
buf5, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 4, 4), (1024, 16, 4, 1))
buf7 = buf1
del buf1
triton_poi_fused_add_convolution_2[grid(4096)](buf7, primals_3,
buf2, primals_5, buf4, primals_7, buf6, primals_9, 4096, XBLOCK
=256, num_warps=4, num_stages=1)
del buf2
del buf4
del buf6
del primals_3
del primals_5
del primals_7
del primals_9
return buf7, primals_2, primals_4, primals_6, primals_8, buf0, buf3, buf5
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super(Conv2dSame, self).__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
class FeatureExtractionBlockNew(nn.Module):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
def __init__(self, input_nc, ngf=64, norm_layer=nn.BatchNorm2d,
use_dropout=False):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
super(FeatureExtractionBlockNew, self).__init__()
self.C3R1 = Conv2dSame(input_nc, ngf, kernel_size=3)
self.C1R1 = Conv2dSame(ngf, ngf, kernel_size=1)
self.C3R2 = Conv2dSame(input_nc, ngf, kernel_size=3)
self.C1R2 = Conv2dSame(ngf, ngf, kernel_size=1)
def forward(self, input_0):
primals_2 = self.C3R1.layer.weight
primals_3 = self.C3R1.layer.bias
primals_6 = self.C1R1.layer.weight
primals_5 = self.C1R1.layer.bias
primals_4 = self.C3R2.layer.weight
primals_7 = self.C3R2.layer.bias
primals_8 = self.C1R2.layer.weight
primals_9 = self.C1R2.layer.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| adityamehta00/HIDeGAN | FeatureExtractionBlock | false | 3,039 | [
"BSD-3-Clause"
] | 0 | 91a0674e092ccde2784a82bf927dfefd8673eb4c | https://github.com/adityamehta00/HIDeGAN/tree/91a0674e092ccde2784a82bf927dfefd8673eb4c | import math
import torch
import torch.utils.data
import torch
import torch.nn as nn
class Conv2dSame(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1):
super().__init__()
self.F = kernel_size
self.S = stride
self.D = dilation
self.layer = nn.Conv2d(in_channels, out_channels, kernel_size,
stride, dilation=dilation)
def forward(self, x_in):
_N, _C, H, W = x_in.shape
H2 = math.ceil(H / self.S)
W2 = math.ceil(W / self.S)
Pr = (H2 - 1) * self.S + (self.F - 1) * self.D + 1 - H
Pc = (W2 - 1) * self.S + (self.F - 1) * self.D + 1 - W
x_pad = nn.ZeroPad2d((Pr // 2, Pr - Pr // 2, Pc // 2, Pc - Pc // 2))(
x_in)
x_relu = nn.ReLU()(x_pad)
x_out = self.layer(x_relu)
return x_out
class Model(nn.Module):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
def __init__(self, input_nc, ngf=64, norm_layer=nn.BatchNorm2d,
use_dropout=False):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
super().__init__()
self.C3R1 = Conv2dSame(input_nc, ngf, kernel_size=3)
self.C1R1 = Conv2dSame(ngf, ngf, kernel_size=1)
self.C3R2 = Conv2dSame(input_nc, ngf, kernel_size=3)
self.C1R2 = Conv2dSame(ngf, ngf, kernel_size=1)
def forward(self, input):
return self.C3R1(input) + self.C3R2(input) + self.C1R1(self.C3R1(input)
) + self.C1R2(self.C3R2(input))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
FastGRNNCell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/w4/cw4ae3o32nppo7llvm7ul7p5le255pvoa5wqcrceacbu2xvt2ydy.py
# Topologically Sorted Source Nodes: [pre_comp, add_1, z, add_2, c, mul, sigmoid_1, sub, mul_1, sigmoid_2, add_3, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# c => tanh
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# new_h => add_4
# pre_comp => add
# sigmoid_1 => sigmoid_1
# sigmoid_2 => sigmoid_2
# sub => sub
# z => sigmoid
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %view_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_5), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %primals_6), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %primals_4), kwargs = {})
# %sigmoid_1 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_7,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %sigmoid), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid_1, %sub), kwargs = {})
# %sigmoid_2 : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%primals_8,), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %sigmoid_2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %tanh), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {})
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_rsub_sigmoid_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + (x0), xmask)
tmp8 = tl.load(in_ptr3 + (0))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp14 = tl.load(in_ptr4 + (0))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp18 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp10 = tl.sigmoid(tmp9)
tmp11 = 1.0
tmp12 = tmp11 - tmp5
tmp13 = tmp10 * tmp12
tmp16 = tl.sigmoid(tmp15)
tmp17 = tmp13 + tmp16
tmp19 = tmp2 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp21 = tmp17 * tmp20
tmp22 = tmp7 + tmp21
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr0 + (x0), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, 1), (1, 1))
assert_size_stride(primals_8, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [wComp], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [uComp], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pre_comp, add_1, z, add_2, c, mul, sigmoid_1, sub, mul_1, sigmoid_2, add_3, mul_2, new_h], Original ATen: [aten.add, aten.sigmoid, aten.tanh, aten.mul, aten.rsub]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0.run(buf2, buf1, primals_5, primals_4, primals_7, primals_8, primals_6, buf3, 256, grid=grid(256), stream=stream0)
del buf1
return (buf3, primals_4, primals_5, primals_6, primals_7, primals_8, buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class FastGRNNCell(RNNCell):
"""
FastGRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
wSparsity = intended sparsity of W matrix(ces)
uSparsity = intended sparsity of U matrix(ces)
Warning:
The Cell will not automatically sparsify.
The user must invoke .sparsify to hard threshold.
zetaInit = init for zeta, the scale param
nuInit = init for nu, the translation param
FastGRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(Wx_t + Uh_{t-1} + B_g)
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'):
super(FastGRNNCell, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank,
wSparsity, uSparsity)
self._zetaInit = zetaInit
self._nuInit = nuInit
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])
)
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1]))
self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1]))
self.copy_previous_UW()
@property
def name(self):
return self._name
@property
def cellType(self):
return 'FastGRNN'
def forward(self, input, state):
if self._wRank is None:
wComp = torch.matmul(input, self.W)
else:
wComp = torch.matmul(torch.matmul(input, self.W1), self.W2)
if self._uRank is None:
uComp = torch.matmul(state, self.U)
else:
uComp = torch.matmul(torch.matmul(state, self.U1), self.U2)
pre_comp = wComp + uComp
z = gen_nonlinearity(pre_comp + self.bias_gate, self._gate_nonlinearity
)
c = gen_nonlinearity(pre_comp + self.bias_update, self.
_update_nonlinearity)
new_h = z * state + (torch.sigmoid(self.zeta) * (1.0 - z) + torch.
sigmoid(self.nu)) * c
return new_h
def getVars(self):
Vars = []
if self._num_W_matrices == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_U_matrices == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
Vars.extend([self.zeta, self.nu])
return Vars
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.onnx
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_mul_rsub_sigmoid_tanh_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
x1 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr2 + x0, xmask)
tmp8 = tl.load(in_ptr3 + 0)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp14 = tl.load(in_ptr4 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp18 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = tl.sigmoid(tmp4)
tmp7 = tmp5 * tmp6
tmp10 = tl.sigmoid(tmp9)
tmp11 = 1.0
tmp12 = tmp11 - tmp5
tmp13 = tmp10 * tmp12
tmp16 = tl.sigmoid(tmp15)
tmp17 = tmp13 + tmp16
tmp19 = tmp2 + tmp18
tmp20 = libdevice.tanh(tmp19)
tmp21 = tmp17 * tmp20
tmp22 = tmp7 + tmp21
tl.store(in_out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr0 + x0, tmp22, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (1, 4), (4, 1))
assert_size_stride(primals_6, (1, 4), (4, 1))
assert_size_stride(primals_7, (1, 1), (1, 1))
assert_size_stride(primals_8, (1, 1), (1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_4, (64, 4), (4, 1), 0),
primals_3, out=buf1)
del primals_3
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_rsub_sigmoid_tanh_0[grid(256)](buf2, buf1,
primals_5, primals_4, primals_7, primals_8, primals_6, buf3,
256, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
return (buf3, primals_4, primals_5, primals_6, primals_7, primals_8,
buf2, reinterpret_tensor(primals_2, (4, 64), (1, 4), 0))
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super(RNNCell, self).__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
self.oldmats[i] = torch.FloatTensor(mats[i].detach().clone())
def sparsify(self):
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
for i in range(0, endW):
mats[i] = utils.hardThreshold(mats[i], self._wSparsity)
for i in range(endW, endU):
mats[i] = utils.hardThreshold(mats[i], self._uSparsity)
self.copy_previous_UW()
def sparsifyWithSupport(self):
mats = self.getVars()
endU = self._num_W_matrices + self._num_U_matrices
for i in range(0, endU):
mats[i] = utils.supportBasedThreshold(mats[i], self.oldmats[i])
class FastGRNNCellNew(RNNCell):
"""
FastGRNN Cell with Both Full Rank and Low Rank Formulations
Has multiple activation functions for the gates
hidden_size = # hidden units
gate_nonlinearity = nonlinearity for the gate can be chosen from
[tanh, sigmoid, relu, quantTanh, quantSigm]
update_nonlinearity = nonlinearity for final rnn update
can be chosen from [tanh, sigmoid, relu, quantTanh, quantSigm]
wRank = rank of W matrix (creates two matrices if not None)
uRank = rank of U matrix (creates two matrices if not None)
wSparsity = intended sparsity of W matrix(ces)
uSparsity = intended sparsity of U matrix(ces)
Warning:
The Cell will not automatically sparsify.
The user must invoke .sparsify to hard threshold.
zetaInit = init for zeta, the scale param
nuInit = init for nu, the translation param
FastGRNN architecture and compression techniques are found in
FastGRNN(LINK) paper
Basic architecture is like:
z_t = gate_nl(Wx_t + Uh_{t-1} + B_g)
h_t^ = update_nl(Wx_t + Uh_{t-1} + B_h)
h_t = z_t*h_{t-1} + (sigmoid(zeta)(1-z_t) + sigmoid(nu))*h_t^
W and U can further parameterised into low rank version by
W = matmul(W_1, W_2) and U = matmul(U_1, U_2)
"""
def __init__(self, input_size, hidden_size, gate_nonlinearity='sigmoid',
update_nonlinearity='tanh', wRank=None, uRank=None, wSparsity=1.0,
uSparsity=1.0, zetaInit=1.0, nuInit=-4.0, name='FastGRNN'):
super(FastGRNNCellNew, self).__init__(input_size, hidden_size,
gate_nonlinearity, update_nonlinearity, 1, 1, 2, wRank, uRank,
wSparsity, uSparsity)
self._zetaInit = zetaInit
self._nuInit = nuInit
if wRank is not None:
self._num_W_matrices += 1
self._num_weight_matrices[0] = self._num_W_matrices
if uRank is not None:
self._num_U_matrices += 1
self._num_weight_matrices[1] = self._num_U_matrices
self._name = name
if wRank is None:
self.W = nn.Parameter(0.1 * torch.randn([input_size, hidden_size]))
else:
self.W1 = nn.Parameter(0.1 * torch.randn([input_size, wRank]))
self.W2 = nn.Parameter(0.1 * torch.randn([wRank, hidden_size]))
if uRank is None:
self.U = nn.Parameter(0.1 * torch.randn([hidden_size, hidden_size])
)
else:
self.U1 = nn.Parameter(0.1 * torch.randn([hidden_size, uRank]))
self.U2 = nn.Parameter(0.1 * torch.randn([uRank, hidden_size]))
self.bias_gate = nn.Parameter(torch.ones([1, hidden_size]))
self.bias_update = nn.Parameter(torch.ones([1, hidden_size]))
self.zeta = nn.Parameter(self._zetaInit * torch.ones([1, 1]))
self.nu = nn.Parameter(self._nuInit * torch.ones([1, 1]))
self.copy_previous_UW()
@property
def name(self):
return self._name
@property
def cellType(self):
return 'FastGRNN'
def getVars(self):
Vars = []
if self._num_W_matrices == 1:
Vars.append(self.W)
else:
Vars.extend([self.W1, self.W2])
if self._num_U_matrices == 1:
Vars.append(self.U)
else:
Vars.extend([self.U1, self.U2])
Vars.extend([self.bias_gate, self.bias_update])
Vars.extend([self.zeta, self.nu])
return Vars
def forward(self, input_0, input_1):
primals_1 = self.W
primals_3 = self.U
primals_5 = self.bias_gate
primals_6 = self.bias_update
primals_7 = self.zeta
primals_8 = self.nu
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| adityakusupati/EdgeML | FastGRNNCell | false | 3,040 | [
"MIT"
] | 0 | 65933a6fdfc38945f4311043a62e120784b2b0bf | https://github.com/adityakusupati/EdgeML/tree/65933a6fdfc38945f4311043a62e120784b2b0bf | import torch
import torch.nn as nn
import torch.onnx
def gen_nonlinearity(A, nonlinearity):
"""
Returns required activation for a tensor based on the inputs
nonlinearity is either a callable or a value in
['tanh', 'sigmoid', 'relu', 'quantTanh', 'quantSigm', 'quantSigm4']
"""
if nonlinearity == 'tanh':
return torch.tanh(A)
elif nonlinearity == 'sigmoid':
return torch.sigmoid(A)
elif nonlinearity == 'relu':
return torch.relu(A, 0.0)
elif nonlinearity == 'quantTanh':
return torch.max(torch.min(A, torch.ones_like(A)), -1.0 * torch.
ones_like(A))
elif nonlinearity == 'quantSigm':
A = (A + 1.0) / 2.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
elif nonlinearity == 'quantSigm4':
A = (A + 2.0) / 4.0
return torch.max(torch.min(A, torch.ones_like(A)), torch.zeros_like(A))
else:
if not callable(nonlinearity):
raise ValueError(
'nonlinearity is either a callable or a value ' +
"['tanh', 'sigmoid', 'relu', 'quantTanh', " + "'quantSigm'")
return nonlinearity(A)
class RNNCell(nn.Module):
def __init__(self, input_size, hidden_size, gate_nonlinearity,
update_nonlinearity, num_W_matrices, num_U_matrices, num_biases,
wRank=None, uRank=None, wSparsity=1.0, uSparsity=1.0):
super().__init__()
self._input_size = input_size
self._hidden_size = hidden_size
self._gate_nonlinearity = gate_nonlinearity
self._update_nonlinearity = update_nonlinearity
self._num_W_matrices = num_W_matrices
self._num_U_matrices = num_U_matrices
self._num_biases = num_biases
self._num_weight_matrices = [self._num_W_matrices, self.
_num_U_matrices, self._num_biases]
self._wRank = wRank
self._uRank = uRank
self._wSparsity = wSparsity
self._uSparsity = uSparsity
self.oldmats = []
@property
def state_size(self):
return self._hidden_size
@property
def input_size(self):
return self._input_size
@property
def output_size(self):
return self._hidden_size
@property
def gate_nonlinearity(self):
return self._gate_nonlinearity
@property
def update_nonlinearity(self):
return self._update_nonlinearity
@property
def wRank(self):
return self._wRank
@property
def uRank(self):
return self._uRank
@property
def num_W_matrices(self):
return self._num_W_matrices
@property
def num_U_matrices(self):
return self._num_U_matrices
@property
def num_weight_matrices(self):
return self._num_weight_matrices
@property
def name(self):
raise NotImplementedError()
def forward(self, input, state):
raise NotImplementedError()
def getVars(self):
raise NotImplementedError()
def get_model_size(self):
"""
Function to get aimed model size
"""
mats = self.getVars()
endW = self._num_W_matrices
endU = endW + self._num_U_matrices
totalnnz = 2
for i in range(0, endW):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._wSparsity)
mats[i]
for i in range(endW, endU):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), self._uSparsity)
mats[i]
for i in range(endU, len(mats)):
mats[i].device
totalnnz += utils.countNNZ(mats[i].cpu(), False)
mats[i]
return totalnnz * 4
def copy_previous_UW(self):
mats = self.getVars()
num_mats = self._num_W_matrices + self._num_U_matrices
if len(self.oldmats) != num_mats:
for i in range(num_mats):
self.oldmats.append(torch.FloatTensor())
for i in range(num_mats):
# ... truncated (>4000 chars) for memory efficiency |
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/6o/c6o7ainbzocsswla76yvmdsc5donraaar3dzlx2icwrueb7fc46u.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hj/chjzotk5iydxvuetxetlv36s7car7cdb24whkuqihxwcy5kkr4o2.py
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# Graph fragment:
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%view_3,), kwargs = {})
triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0); del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf4, 16384, grid=grid(16384), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [tanh], Original ATen: [aten.tanh]
triton_poi_fused_tanh_1.run(buf3, primals_5, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 256), (256, 1), 0), buf3, primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Actor(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc_units=256):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Actor, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units)
self.fc2 = nn.Linear(fc_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
return F.tanh(self.fc2(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 256), (256, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0
)
del buf0
buf4 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1,
primals_2, buf4, 16384, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0),
reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_tanh_1[grid(256)](buf3, primals_5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_5
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 256), (256, 1), 0
), buf3, primals_4, buf4
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class ActorNew(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc_units=256):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(ActorNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units)
self.fc2 = nn.Linear(fc_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| ahgit2021/deep-reinforcement-learning | Actor | false | 3,041 | [
"MIT"
] | 0 | 081464ba45f803663e841e1635d829aa00cce870 | https://github.com/ahgit2021/deep-reinforcement-learning/tree/081464ba45f803663e841e1635d829aa00cce870 | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Model(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size, seed, fc_units=256):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fc1 = nn.Linear(state_size, fc_units)
self.fc2 = nn.Linear(fc_units, action_size)
self.reset_parameters()
def reset_parameters(self):
self.fc1.weight.data.uniform_(*hidden_init(self.fc1))
self.fc2.weight.data.uniform_(-0.003, 0.003)
def forward(self, state):
"""Build an actor (policy) network that maps states -> actions."""
x = F.relu(self.fc1(state))
return F.tanh(self.fc2(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
FitNet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/es/ceshv5bens4vpbp3i56i4r5e7jdsbiee6ioonjkerfwdoe5uflhr.py
# Topologically Sorted Source Nodes: [sub, pow_1, mean], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# mean => mean
# pow_1 => pow_1
# sub => sub
# Graph fragment:
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution, %primals_3), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 1.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Scalar](args = (%pow_2, 2.0), kwargs = {})
triton_per_fused_mean_mul_pow_sub_0 = async_compile.triton('triton_per_fused_mean_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 2.0
tmp8 = tmp2 * tmp7
tmp9 = 256.0
tmp10 = tmp6 / tmp9
tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp8, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [sub, pow_1, mean], Original ATen: [aten.sub, aten.pow, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0.run(buf3, buf0, primals_3, buf2, 1, 256, grid=grid(1), stream=stream0)
del buf0
del primals_3
return (buf3, primals_1, primals_2, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class FitNet(nn.Module):
def __init__(self, in_feature, out_feature):
super().__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.transform = nn.Conv2d(in_feature, out_feature, 1, bias=False)
self.transform.weight.data.uniform_(-0.005, 0.005)
def forward(self, student, teacher):
if student.dim() == 2:
student = student.unsqueeze(2).unsqueeze(3)
teacher = teacher.unsqueeze(2).unsqueeze(3)
return (self.transform(student) - teacher).pow(2).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1,
out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 2.0
tmp8 = tmp2 * tmp7
tmp9 = 256.0
tmp10 = tmp6 / tmp9
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp8, None)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp10, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf3 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_mean_mul_pow_sub_0[grid(1)](buf3, buf0, primals_3,
buf2, 1, 256, num_warps=2, num_stages=1)
del buf0
del primals_3
return buf3, primals_1, primals_2, buf2
class FitNetNew(nn.Module):
def __init__(self, in_feature, out_feature):
super().__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.transform = nn.Conv2d(in_feature, out_feature, 1, bias=False)
self.transform.weight.data.uniform_(-0.005, 0.005)
def forward(self, input_0, input_1):
primals_2 = self.transform.weight
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| ahu-hpt/AOMD | FitNet | false | 3,043 | [
"Apache-2.0"
] | 0 | 8d99dbb803feaef55fc089bfb3399d2fb21d55d8 | https://github.com/ahu-hpt/AOMD/tree/8d99dbb803feaef55fc089bfb3399d2fb21d55d8 | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, in_feature, out_feature):
super().__init__()
self.in_feature = in_feature
self.out_feature = out_feature
self.transform = nn.Conv2d(in_feature, out_feature, 1, bias=False)
self.transform.weight.data.uniform_(-0.005, 0.005)
def forward(self, student, teacher):
if student.dim() == 2:
student = student.unsqueeze(2).unsqueeze(3)
teacher = teacher.unsqueeze(2).unsqueeze(3)
return (self.transform(student) - teacher).pow(2).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7z/c7zuih2ysjtir5rh5seep5ijnhokjlgkyjw2edhf257ahvz4iipr.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/j6/cj6faeofhfnxsh5iuwazughjlau4igyajnmvjequyelq7apzs4qm.py
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_2 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6y/c6yx6oq7oo2cwoaop3iwu5iqfdckg6lycdtu4jjuiv3wdcf2o6p7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_3 => getitem_2, getitem_3
# Graph fragment:
# %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/c3/cc37wvituo2asffgdbn2cnuhsr4nuj5pzt75pvxxxx4t7tdtdkqj.py
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_4 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_4 = async_compile.triton('triton_poi_fused_convolution_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ec/cecfzmb7lcmubnp7axcg34e5wnqbt6562ckr2vzbwja7yvxfo3xj.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_4, getitem_5
# Graph fragment:
# %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {})
# %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_5 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, None)
tl.store(out_ptr1 + (x2), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/hl/chl7flvid7e5gyonniefbfl2pnjfi5mtp3ggr6ojn2nvpq7bic6t.py
# Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_6 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 2048
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 8
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yv/cyvzwgrshfgduj4qeigfrsvgb6byugegf65umttyn2kk3ubrji4w.py
# Topologically Sorted Source Nodes: [enc], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# enc => getitem_6, getitem_7
# Graph fragment:
# %getitem_6 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 0), kwargs = {})
# %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_7 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x2), tmp6, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/5j/c5j5auducgrvefhuebskm2kmafzvwd26ccc3idoylmyead3sqjdn.py
# Topologically Sorted Source Nodes: [res_4], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# res_4 => convolution_8
# Graph fragment:
# %convolution_8 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_8 = async_compile.triton('triton_poi_fused_convolution_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_8', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (8, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (8, ), (1, ))
assert_size_stride(primals_10, (8, 8, 2, 2), (32, 4, 2, 1))
assert_size_stride(primals_11, (8, ), (1, ))
assert_size_stride(primals_12, (8, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_13, (16, ), (1, ))
assert_size_stride(primals_14, (16, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_15, (32, ), (1, ))
assert_size_stride(primals_16, (32, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_17, (64, ), (1, ))
assert_size_stride(primals_18, (4, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 1048576, grid=grid(1048576), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 262144, grid=grid(262144), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf5, primals_5, 131072, grid=grid(131072), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.float32)
buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf9, primals_7, 16384, grid=grid(16384), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.float32)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_5.run(buf9, buf10, buf11, 4096, grid=grid(4096), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 8, 8, 8), (512, 64, 8, 1))
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf13, primals_9, 2048, grid=grid(2048), stream=stream0)
del primals_9
buf14 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
buf15 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.int8)
# Topologically Sorted Source Nodes: [enc], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_7.run(buf13, buf14, buf15, 512, grid=grid(512), stream=stream0)
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf16 = extern_kernels.convolution(buf14, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 8, 8, 8), (512, 64, 8, 1))
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, res], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf17, primals_11, 2048, grid=grid(2048), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf18 = extern_kernels.convolution(buf17, primals_12, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 16, 16, 16), (4096, 256, 16, 1))
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, res_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_4.run(buf19, primals_13, 16384, grid=grid(16384), stream=stream0)
del primals_13
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf20 = extern_kernels.convolution(buf19, primals_14, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf21 = buf20; del buf20 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, res_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf21, primals_15, 131072, grid=grid(131072), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv_transpose2d_3], Original ATen: [aten.convolution]
buf22 = extern_kernels.convolution(buf21, primals_16, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_3, res_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf23, primals_17, 1048576, grid=grid(1048576), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [res_4], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [res_4], Original ATen: [aten.convolution]
triton_poi_fused_convolution_8.run(buf25, primals_19, 65536, grid=grid(65536), stream=stream0)
del primals_19
return (buf14, buf25, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11, buf13, buf14, buf15, buf17, buf19, buf21, buf23, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((8, 16, 3, 3), (144, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((8, 8, 2, 2), (32, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((8, 16, 2, 2), (64, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((16, 32, 2, 2), (128, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((32, 64, 2, 2), (256, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((4, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn
class AE(nn.Module):
def __init__(self, num_channels):
super(AE, self).__init__()
self.enc1 = nn.Conv2d(num_channels, 64, kernel_size=3, padding=1)
self.enc2 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
self.enc3 = nn.Conv2d(32, 16, kernel_size=3, padding=1)
self.enc4 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.dec1 = nn.ConvTranspose2d(8, 8, kernel_size=2, stride=2)
self.dec2 = nn.ConvTranspose2d(8, 16, kernel_size=2, stride=2)
self.dec3 = nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2)
self.dec4 = nn.ConvTranspose2d(32, 64, kernel_size=2, stride=2)
self.out = nn.Conv2d(64, num_channels, kernel_size=3, padding=1)
def forward(self, x):
x = F.relu(self.enc1(x))
x = self.pool(x)
x = F.relu(self.enc2(x))
x = self.pool(x)
x = F.relu(self.enc3(x))
x = self.pool(x)
x = F.relu(self.enc4(x))
enc = self.pool(x)
res = F.relu(self.dec1(enc))
res = F.relu(self.dec2(res))
res = F.relu(self.dec3(res))
res = F.relu(self.dec4(res))
res = self.out(res)
return enc, res
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'num_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_4(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 16
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, None)
tl.store(out_ptr1 + x2, tmp16, None)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 8
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_7(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), xmask, eviction_policy
='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + x2, tmp6, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_8(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (16, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (8, 16, 3, 3), (144, 9, 3, 1))
assert_size_stride(primals_9, (8,), (1,))
assert_size_stride(primals_10, (8, 8, 2, 2), (32, 4, 2, 1))
assert_size_stride(primals_11, (8,), (1,))
assert_size_stride(primals_12, (8, 16, 2, 2), (64, 4, 2, 1))
assert_size_stride(primals_13, (16,), (1,))
assert_size_stride(primals_14, (16, 32, 2, 2), (128, 4, 2, 1))
assert_size_stride(primals_15, (32,), (1,))
assert_size_stride(primals_16, (32, 64, 2, 2), (256, 4, 2, 1))
assert_size_stride(primals_17, (64,), (1,))
assert_size_stride(primals_18, (4, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_19, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_2,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
buf3 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(262144)](buf1, buf2,
buf3, 262144, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_2[grid(131072)](buf5, primals_5,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.float32)
buf7 = empty_strided_cuda((4, 32, 16, 16), (8192, 256, 16, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(32768)](buf5, buf6,
buf7, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 16, 16, 16), (4096, 256, 16, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_4[grid(16384)](buf9, primals_7,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.
float32)
buf11 = empty_strided_cuda((4, 16, 8, 8), (1024, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_5[grid(4096)](buf9, buf10,
buf11, 4096, XBLOCK=128, num_warps=4, num_stages=1)
buf12 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 8, 8, 8), (512, 64, 8, 1))
buf13 = buf12
del buf12
triton_poi_fused_convolution_relu_6[grid(2048)](buf13, primals_9,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_9
buf14 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32
)
buf15 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_7[grid(512)](buf13, buf14,
buf15, 512, XBLOCK=128, num_warps=4, num_stages=1)
buf16 = extern_kernels.convolution(buf14, primals_10, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf16, (4, 8, 8, 8), (512, 64, 8, 1))
buf17 = buf16
del buf16
triton_poi_fused_convolution_relu_6[grid(2048)](buf17, primals_11,
2048, XBLOCK=128, num_warps=4, num_stages=1)
del primals_11
buf18 = extern_kernels.convolution(buf17, primals_12, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf18, (4, 16, 16, 16), (4096, 256, 16, 1))
buf19 = buf18
del buf18
triton_poi_fused_convolution_relu_4[grid(16384)](buf19, primals_13,
16384, XBLOCK=256, num_warps=4, num_stages=1)
del primals_13
buf20 = extern_kernels.convolution(buf19, primals_14, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf20, (4, 32, 32, 32), (32768, 1024, 32, 1))
buf21 = buf20
del buf20
triton_poi_fused_convolution_relu_2[grid(131072)](buf21, primals_15,
131072, XBLOCK=512, num_warps=8, num_stages=1)
del primals_15
buf22 = extern_kernels.convolution(buf21, primals_16, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf22, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf23 = buf22
del buf22
triton_poi_fused_convolution_relu_0[grid(1048576)](buf23,
primals_17, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_17
buf24 = extern_kernels.convolution(buf23, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf25 = buf24
del buf24
triton_poi_fused_convolution_8[grid(65536)](buf25, primals_19,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_19
return (buf14, buf25, primals_1, primals_3, primals_4, primals_6,
primals_8, primals_10, primals_12, primals_14, primals_16,
primals_18, buf1, buf2, buf3, buf5, buf6, buf7, buf9, buf10, buf11,
buf13, buf14, buf15, buf17, buf19, buf21, buf23)
class AENew(nn.Module):
def __init__(self, num_channels):
super(AENew, self).__init__()
self.enc1 = nn.Conv2d(num_channels, 64, kernel_size=3, padding=1)
self.enc2 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
self.enc3 = nn.Conv2d(32, 16, kernel_size=3, padding=1)
self.enc4 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.dec1 = nn.ConvTranspose2d(8, 8, kernel_size=2, stride=2)
self.dec2 = nn.ConvTranspose2d(8, 16, kernel_size=2, stride=2)
self.dec3 = nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2)
self.dec4 = nn.ConvTranspose2d(32, 64, kernel_size=2, stride=2)
self.out = nn.Conv2d(64, num_channels, kernel_size=3, padding=1)
def forward(self, input_0):
primals_1 = self.enc1.weight
primals_2 = self.enc1.bias
primals_4 = self.enc2.weight
primals_5 = self.enc2.bias
primals_6 = self.enc3.weight
primals_7 = self.enc3.bias
primals_8 = self.enc4.weight
primals_9 = self.enc4.bias
primals_10 = self.dec1.weight
primals_11 = self.dec1.bias
primals_12 = self.dec2.weight
primals_13 = self.dec2.bias
primals_14 = self.dec3.weight
primals_15 = self.dec3.bias
primals_16 = self.dec4.weight
primals_17 = self.dec4.bias
primals_18 = self.out.weight
primals_19 = self.out.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19])
return output[0], output[1]
| ahanagemini/Phd_final_year_old_sr | AE | false | 3,045 | [
"BSD-2-Clause"
] | 0 | 62be9d1294acdb724a2fe424789b657a44e2cd7d | https://github.com/ahanagemini/Phd_final_year_old_sr/tree/62be9d1294acdb724a2fe424789b657a44e2cd7d | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn
class Model(nn.Module):
def __init__(self, num_channels):
super().__init__()
self.enc1 = nn.Conv2d(num_channels, 64, kernel_size=3, padding=1)
self.enc2 = nn.Conv2d(64, 32, kernel_size=3, padding=1)
self.enc3 = nn.Conv2d(32, 16, kernel_size=3, padding=1)
self.enc4 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.dec1 = nn.ConvTranspose2d(8, 8, kernel_size=2, stride=2)
self.dec2 = nn.ConvTranspose2d(8, 16, kernel_size=2, stride=2)
self.dec3 = nn.ConvTranspose2d(16, 32, kernel_size=2, stride=2)
self.dec4 = nn.ConvTranspose2d(32, 64, kernel_size=2, stride=2)
self.out = nn.Conv2d(64, num_channels, kernel_size=3, padding=1)
def forward(self, x):
x = F.relu(self.enc1(x))
x = self.pool(x)
x = F.relu(self.enc2(x))
x = self.pool(x)
x = F.relu(self.enc3(x))
x = self.pool(x)
x = F.relu(self.enc4(x))
enc = self.pool(x)
res = F.relu(self.dec1(enc))
res = F.relu(self.dec2(res))
res = F.relu(self.dec3(res))
res = F.relu(self.dec4(res))
res = self.out(res)
return enc, res
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4]
|
Critic | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jc/cjcmonojgt5syfl4gwrg64cs7sexr7chwsyoox5gualzchbfx2uu.py
# Topologically Sorted Source Nodes: [xs], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# xs => gt
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_2), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sx/csxmixchkiwaaxctdsbdahgg6l6kte2lpjzox5oikoelz6hlpwyr.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1040
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 260
x1 = (xindex // 260)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((256*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + ((256*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 260, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + ((4*x1) + ((-256) + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/u2/cu2l6dx3mmsceog6k5g5ymaniat6gqe77ap35mcbeyh7afpuin2g.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/yh/cyh4nxttmv2s545nye3lr4ggbk423yhruoum4fzb24b64iqxuimy.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.01), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %add_tensor, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 260), (260, 1))
assert_size_stride(primals_6, (256, ), (1, ))
assert_size_stride(primals_7, (128, 256), (256, 1))
assert_size_stride(primals_8, (128, ), (1, ))
assert_size_stride(primals_9, (2, 128), (128, 1))
assert_size_stride(primals_10, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
# Topologically Sorted Source Nodes: [xs], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, 1024, grid=grid(1024), stream=stream0)
buf2 = empty_strided_cuda((4, 260), (260, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, buf0, primals_2, primals_4, buf2, 1040, grid=grid(1040), stream=stream0)
del primals_2
del primals_4
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (260, 256), (1, 260), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf3, primals_6, buf4, buf5, 1024, grid=grid(1024), stream=stream0)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 128), (1, 256), 0), out=buf6)
buf7 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf6, primals_8, buf7, buf8, 512, grid=grid(512), stream=stream0)
del buf6
del primals_8
buf9 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, buf8, reinterpret_tensor(primals_9, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf9)
del primals_10
return (buf9, primals_3, buf1, buf2, buf4, buf5, buf7, buf8, primals_9, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((256, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((256, 260), (260, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, 256), (256, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((2, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Critic(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=256,
fc2_units=256, fc3_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(Critic, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, fc3_units)
self.fc4 = nn.Linear(fc3_units, 2)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = F.leaky_relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'state_size': 4, 'action_size': 4, 'seed': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1040
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 260
x1 = xindex // 260
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (256 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (256 * x1 + x0), tmp4 & xmask, eviction_policy
='evict_last', other=0.0)
tmp7 = tl.load(in_ptr2 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.01
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 260, tl.int64)
tmp17 = tl.load(in_ptr3 + (4 * x1 + (-256 + x0)), tmp14 & xmask,
eviction_policy='evict_last', other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 256
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (256, 4), (4, 1))
assert_size_stride(primals_2, (256,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (256, 260), (260, 1))
assert_size_stride(primals_6, (256,), (1,))
assert_size_stride(primals_7, (128, 256), (256, 1))
assert_size_stride(primals_8, (128,), (1,))
assert_size_stride(primals_9, (2, 128), (128, 1))
assert_size_stride(primals_10, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 256),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(1024)](buf0, primals_2, buf1,
1024, XBLOCK=128, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4, 260), (260, 1), torch.float32)
triton_poi_fused_cat_1[grid(1040)](buf1, buf0, primals_2, primals_4,
buf2, 1040, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
buf3 = buf0
del buf0
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (260, 256), (
1, 260), 0), out=buf3)
buf4 = empty_strided_cuda((4, 256), (256, 1), torch.bool)
buf5 = empty_strided_cuda((4, 256), (256, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(1024)](buf3, primals_6, buf4,
buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1)
del buf3
del primals_6
buf6 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_7, (256, 128), (
1, 256), 0), out=buf6)
buf7 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf8 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
triton_poi_fused_leaky_relu_3[grid(512)](buf6, primals_8, buf7,
buf8, 512, XBLOCK=128, num_warps=4, num_stages=1)
del buf6
del primals_8
buf9 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_10, buf8, reinterpret_tensor(primals_9,
(128, 2), (1, 128), 0), alpha=1, beta=1, out=buf9)
del primals_10
return (buf9, primals_3, buf1, buf2, buf4, buf5, buf7, buf8, primals_9,
primals_7, primals_5)
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class CriticNew(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=256,
fc2_units=256, fc3_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super(CriticNew, self).__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, fc3_units)
self.fc4 = nn.Linear(fc3_units, 2)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(-0.003, 0.003)
def forward(self, input_0, input_1):
primals_1 = self.fcs1.weight
primals_2 = self.fcs1.bias
primals_5 = self.fc2.weight
primals_6 = self.fc2.bias
primals_7 = self.fc3.weight
primals_8 = self.fc3.bias
primals_9 = self.fc4.weight
primals_10 = self.fc4.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| ahgit2021/deep-reinforcement-learning | Critic | false | 3,046 | [
"MIT"
] | 0 | 081464ba45f803663e841e1635d829aa00cce870 | https://github.com/ahgit2021/deep-reinforcement-learning/tree/081464ba45f803663e841e1635d829aa00cce870 | import torch
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
def hidden_init(layer):
fan_in = layer.weight.data.size()[0]
lim = 1.0 / np.sqrt(fan_in)
return -lim, lim
class Model(nn.Module):
"""Critic (Value) Model."""
def __init__(self, state_size, action_size, seed, fcs1_units=256,
fc2_units=256, fc3_units=128):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fcs1_units (int): Number of nodes in the first hidden layer
fc2_units (int): Number of nodes in the second hidden layer
"""
super().__init__()
self.seed = torch.manual_seed(seed)
self.fcs1 = nn.Linear(state_size, fcs1_units)
self.fc2 = nn.Linear(fcs1_units + action_size, fc2_units)
self.fc3 = nn.Linear(fc2_units, fc3_units)
self.fc4 = nn.Linear(fc3_units, 2)
self.reset_parameters()
def reset_parameters(self):
self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1))
self.fc2.weight.data.uniform_(*hidden_init(self.fc2))
self.fc3.weight.data.uniform_(*hidden_init(self.fc3))
self.fc4.weight.data.uniform_(-0.003, 0.003)
def forward(self, state, action):
"""Build a critic (value) network that maps (state, action) pairs -> Q-values."""
xs = F.leaky_relu(self.fcs1(state))
x = torch.cat((xs, action), dim=1)
x = F.leaky_relu(self.fc2(x))
x = F.leaky_relu(self.fc3(x))
return self.fc4(x)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
Network | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jq/cjqaq2meov3vkcgfealq7w4w35tw2oemvmhneuxmigeoumva22p7.py
# Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# y_pred => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_5,), kwargs = {})
triton_poi_fused_sigmoid_0 = async_compile.triton('triton_poi_fused_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf1, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [y_pred], Original ATen: [aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_sigmoid_0.run(buf3, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
return (buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, buf1, buf3, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Network(nn.Module):
def __init__(self, number_of_inputs, number_of_outputs):
super(Network, self).__init__()
self.l1 = nn.Linear(number_of_inputs, number_of_inputs)
self.l2 = nn.Linear(number_of_inputs, number_of_inputs)
self.l3 = nn.Linear(number_of_inputs, number_of_outputs)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out1 = self.l1(x)
out2 = self.l2(out1)
y_pred = self.sigmoid(self.l3(out2))
return y_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'number_of_inputs': 4, 'number_of_outputs': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sigmoid_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(buf1, reinterpret_tensor(primals_6, (4, 4), (1, 4
), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
get_raw_stream(0)
triton_poi_fused_sigmoid_0[grid(256)](buf3, primals_7, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_7
return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf0, buf1, buf3, primals_6, primals_4
class NetworkNew(nn.Module):
def __init__(self, number_of_inputs, number_of_outputs):
super(NetworkNew, self).__init__()
self.l1 = nn.Linear(number_of_inputs, number_of_inputs)
self.l2 = nn.Linear(number_of_inputs, number_of_inputs)
self.l3 = nn.Linear(number_of_inputs, number_of_outputs)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| ajthinking/migration-analysis | Network | false | 3,047 | [
"MIT"
] | 0 | c48312c5bf513a37e23eaaf8aa245921992f8e7f | https://github.com/ajthinking/migration-analysis/tree/c48312c5bf513a37e23eaaf8aa245921992f8e7f | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, number_of_inputs, number_of_outputs):
super().__init__()
self.l1 = nn.Linear(number_of_inputs, number_of_inputs)
self.l2 = nn.Linear(number_of_inputs, number_of_inputs)
self.l3 = nn.Linear(number_of_inputs, number_of_outputs)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out1 = self.l1(x)
out2 = self.l2(out1)
y_pred = self.sigmoid(self.l3(out2))
return y_pred
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
AttentionTransfer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/t5/ct56alqgoynhiombzpq5fm5wf3zh32egabawyhxhvug4d66ecouu.py
# Topologically Sorted Source Nodes: [s_attention, t_attention, sub], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub]
# Source node to ATen node mapping:
# s_attention => div, pow_2, sum_1
# sub => sub
# t_attention => div_1, pow_5, sum_2
# Graph fragment:
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2.0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_2, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %expand), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_5, [1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %expand_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {})
triton_per_fused_div_linalg_vector_norm_sub_0 = async_compile.triton('triton_per_fused_div_linalg_vector_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_linalg_vector_norm_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr0 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp18 = tl.load(in_ptr1 + (r1 + (64*x0)), xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (16 + r1 + (64*x0)), xmask, other=0.0)
tmp23 = tl.load(in_ptr1 + (32 + r1 + (64*x0)), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (48 + r1 + (64*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp19 = tmp18 * tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp23 * tmp23
tmp25 = tmp22 + tmp24
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp28 / tmp11
tmp30 = tmp29 * tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.where(xmask, tmp31, 0)
tmp34 = tl.sum(tmp33, 1)[:, None]
tmp35 = libdevice.sqrt(tmp17)
tmp36 = 1e-12
tmp37 = triton_helpers.maximum(tmp35, tmp36)
tmp38 = tmp12 / tmp37
tmp39 = libdevice.sqrt(tmp34)
tmp40 = triton_helpers.maximum(tmp39, tmp36)
tmp41 = tmp29 / tmp40
tmp42 = tmp38 - tmp41
tl.store(out_ptr2 + (r1 + (16*x0)), tmp42, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dg/cdge47s7gmdrhyr456afj4kvgwm5zcwggjqhop2lppsiu5wcqdx6.py
# Topologically Sorted Source Nodes: [pow_3, mean_2], Original ATen: [aten.pow, aten.mean]
# Source node to ATen node mapping:
# mean_2 => mean_2
# pow_3 => pow_7
# Graph fragment:
# %pow_7 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_7,), kwargs = {})
triton_per_fused_mean_pow_1 = async_compile.triton('triton_per_fused_mean_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_pow_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [s_attention, t_attention, sub], Original ATen: [aten.linalg_vector_norm, aten.div, aten.sub]
stream0 = get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_sub_0.run(arg0_1, arg1_1, buf2, 4, 16, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [pow_3, mean_2], Original ATen: [aten.pow, aten.mean]
triton_per_fused_mean_pow_1.run(buf4, buf2, 1, 64, grid=grid(1), stream=stream0)
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionTransfer(nn.Module):
def forward(self, student, teacher):
s_attention = F.normalize(student.pow(2).mean(1).view(student.size(
0), -1))
with torch.no_grad():
t_attention = F.normalize(teacher.pow(2).mean(1).view(teacher.
size(0), -1))
return (s_attention - t_attention).pow(2).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_linalg_vector_norm_sub_0(in_ptr0, in_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp5 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp8 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp18 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0)
tmp20 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0)
tmp23 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0)
tmp26 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = 4.0
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK])
tmp16 = tl.where(xmask, tmp14, 0)
tmp17 = tl.sum(tmp16, 1)[:, None]
tmp19 = tmp18 * tmp18
tmp21 = tmp20 * tmp20
tmp22 = tmp19 + tmp21
tmp24 = tmp23 * tmp23
tmp25 = tmp22 + tmp24
tmp27 = tmp26 * tmp26
tmp28 = tmp25 + tmp27
tmp29 = tmp28 / tmp11
tmp30 = tmp29 * tmp29
tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK])
tmp33 = tl.where(xmask, tmp31, 0)
tmp34 = tl.sum(tmp33, 1)[:, None]
tmp35 = libdevice.sqrt(tmp17)
tmp36 = 1e-12
tmp37 = triton_helpers.maximum(tmp35, tmp36)
tmp38 = tmp12 / tmp37
tmp39 = libdevice.sqrt(tmp34)
tmp40 = triton_helpers.maximum(tmp39, tmp36)
tmp41 = tmp29 / tmp40
tmp42 = tmp38 - tmp41
tl.store(out_ptr2 + (r1 + 16 * x0), tmp42, xmask)
@triton.jit
def triton_per_fused_mean_pow_1(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.sum(tmp2, 1)[:, None]
tmp5 = 64.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_linalg_vector_norm_sub_0[grid(4)](arg0_1,
arg1_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf3 = empty_strided_cuda((), (), torch.float32)
buf4 = buf3
del buf3
triton_per_fused_mean_pow_1[grid(1)](buf4, buf2, 1, 64, XBLOCK=1,
num_warps=2, num_stages=1)
del buf2
return buf4,
class AttentionTransferNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ahu-hpt/AOMD | AttentionTransfer | false | 3,048 | [
"Apache-2.0"
] | 0 | 8d99dbb803feaef55fc089bfb3399d2fb21d55d8 | https://github.com/ahu-hpt/AOMD/tree/8d99dbb803feaef55fc089bfb3399d2fb21d55d8 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def forward(self, student, teacher):
s_attention = F.normalize(student.pow(2).mean(1).view(student.size(
0), -1))
with torch.no_grad():
t_attention = F.normalize(teacher.pow(2).mean(1).view(teacher.
size(0), -1))
return (s_attention - t_attention).pow(2).mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
HardDarkRank | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/cg/ccgeik5lg55dafw2bimkoqczeqgbcybsxris5v5litllvo7d4564.py
# Topologically Sorted Source Nodes: [add, mul, sub, res, res_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# mul => mul
# res => clamp_min
# res_1 => sqrt
# sub => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
triton_poi_fused_add_clamp_mul_sqrt_sub_0 = async_compile.triton('triton_poi_fused_add_clamp_mul_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_mul_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = 1e-12
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = libdevice.sqrt(tmp28)
tl.store(in_out_ptr0 + (x2), tmp29, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/p3/cp3o24hg2b6ufn2jb57bo43mpub6ukgixynbrienm5d6lmj6ry7z.py
# Topologically Sorted Source Nodes: [res, res_1, setitem], Original ATen: [aten.clamp, aten.sqrt, aten.lift_fresh, aten.index_put]
# Source node to ATen node mapping:
# res => clamp_min
# res_1 => sqrt
# setitem => full_default, index_put
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%sqrt, [%lift_fresh_copy_1, %lift_fresh_copy_2], %full_default), kwargs = {})
triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1 = async_compile.triton('triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1', 'mutated_arg_names': ['out_ptr0'], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp11 = 0.0
tl.store(out_ptr0 + (tl.broadcast_to(5*tmp10, [XBLOCK])), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/pv/cpv4qpjkkjnm64osykztotp63kwayeyw77adir2adz3offniwul5.py
# Topologically Sorted Source Nodes: [pow_2, score_teacher, sort], Original ATen: [aten.pow, aten.mul, aten.sort]
# Source node to ATen node mapping:
# pow_2 => pow_2
# score_teacher => mul_1
# sort => sort
# Graph fragment:
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index_put, 3), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_2, -3), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%mul_1, 1, True), kwargs = {})
triton_per_fused_mul_pow_sort_2 = async_compile.triton('triton_per_fused_mul_pow_sort_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i16', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mul_pow_sort_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mul_pow_sort_2(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = -3.0
tmp4 = tmp2 * tmp3
tmp5 = r1
tmp6 = tmp5.to(tl.int16)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp8 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9, tmp10, = triton_helpers.sort_with_index(tmp7, tmp8, None, 1, stable=False, descending=True)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/52/c52mir27ozx2cplfhvyjwkfxszj7w3iaikplsnnxn2n6hibidbjz.py
# Topologically Sorted Source Nodes: [pow_4, score_student, ordered_student, logsumexp], Original ATen: [aten.pow, aten.mul, aten.gather, aten.logsumexp]
# Source node to ATen node mapping:
# logsumexp => abs_1, amax, eq, exp, full_default_2, sub_2, sum_3, where
# ordered_student => gather
# pow_4 => pow_4
# score_student => mul_3
# Graph fragment:
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index_put_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_4, -3), kwargs = {})
# %gather : [num_users=5] = call_function[target=torch.ops.aten.gather.default](args = (%mul_3, 1, %slice_2), kwargs = {})
# %amax : [num_users=2] = call_function[target=torch.ops.aten.amax.default](args = (%gather, [1], True), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%amax,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%abs_1, inf), kwargs = {})
# %full_default_2 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_2, %amax), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%gather, %where), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_2,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1]), kwargs = {})
triton_poi_fused_gather_logsumexp_mul_pow_3 = async_compile.triton('triton_poi_fused_gather_logsumexp_mul_pow_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i16', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gather_logsumexp_mul_pow_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gather_logsumexp_mul_pow_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert(((0 <= tmp5) & (tmp5 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp5 < 4")
tmp7 = tl.load(in_ptr1 + (tmp5 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tmp7 * tmp7
tmp9 = tmp8 * tmp7
tmp10 = -3.0
tmp11 = tmp9 * tmp10
tmp13 = tmp12.to(tl.int64)
tmp14 = tmp13 + tmp2
tmp15 = tmp13 < 0
tmp16 = tl.where(tmp15, tmp14, tmp13)
tl.device_assert(((0 <= tmp16) & (tmp16 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp16 < 4")
tmp18 = tl.load(in_ptr1 + (tmp16 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tmp18 * tmp18
tmp20 = tmp19 * tmp18
tmp21 = tmp20 * tmp10
tmp22 = triton_helpers.maximum(tmp11, tmp21)
tmp24 = tmp23.to(tl.int64)
tmp25 = tmp24 + tmp2
tmp26 = tmp24 < 0
tmp27 = tl.where(tmp26, tmp25, tmp24)
tl.device_assert(((0 <= tmp27) & (tmp27 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp27 < 4")
tmp29 = tl.load(in_ptr1 + (tmp27 + (4*x0)), xmask, eviction_policy='evict_last')
tmp30 = tmp29 * tmp29
tmp31 = tmp30 * tmp29
tmp32 = tmp31 * tmp10
tmp33 = triton_helpers.maximum(tmp22, tmp32)
tmp34 = tl_math.abs(tmp33)
tmp35 = float("inf")
tmp36 = tmp34 == tmp35
tmp37 = 0.0
tmp38 = tl.where(tmp36, tmp37, tmp33)
tmp39 = tmp11 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp21 - tmp38
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp44 = tmp32 - tmp38
tmp45 = tl_math.exp(tmp44)
tmp46 = tmp43 + tmp45
tl.store(out_ptr0 + (x0), tmp33, xmask)
tl.store(out_ptr1 + (x0), tmp46, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vs/cvsaweidwnsbxk5lizky3axmdtnikqzfh7myhvsev343f4fja5ez.py
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze_4, %unsqueeze_5, %unsqueeze_6], 1), kwargs = {})
triton_poi_fused_stack_4 = async_compile.triton('triton_poi_fused_stack_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i16', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = (xindex // 3)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl_math.log(tmp5)
tmp7 = tl.load(in_ptr1 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = tl_math.abs(tmp7)
tmp9 = float("inf")
tmp10 = tmp8 == tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp10, tmp11, tmp7)
tmp13 = tmp6 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + (2 + (4*x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp20.to(tl.int64)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert(((0 <= tl.broadcast_to(tmp25, [XBLOCK])) & (tl.broadcast_to(tmp25, [XBLOCK]) < 4)) | ~(tmp19 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp25, [XBLOCK]) < 4")
tmp27 = tl.load(in_ptr3 + (tmp25 + (4*x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp28 = tmp27 * tmp27
tmp29 = tmp28 * tmp27
tmp30 = -3.0
tmp31 = tmp29 * tmp30
tmp32 = tl.load(in_ptr2 + (3 + (4*x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 + tmp22
tmp35 = tmp33 < 0
tmp36 = tl.where(tmp35, tmp34, tmp33)
tl.device_assert(((0 <= tl.broadcast_to(tmp36, [XBLOCK])) & (tl.broadcast_to(tmp36, [XBLOCK]) < 4)) | ~(tmp19 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp36, [XBLOCK]) < 4")
tmp38 = tl.load(in_ptr3 + (tmp36 + (4*x1)), tmp19 & xmask, eviction_policy='evict_last', other=0.0)
tmp39 = tmp38 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp30
tmp42 = triton_helpers.maximum(tmp31, tmp41)
tmp43 = tl_math.abs(tmp42)
tmp44 = tmp43 == tmp9
tmp45 = tl.where(tmp44, tmp11, tmp42)
tmp46 = tmp31 - tmp45
tmp47 = tl_math.exp(tmp46)
tmp48 = tmp41 - tmp45
tmp49 = tl_math.exp(tmp48)
tmp50 = tmp47 + tmp49
tmp51 = tl_math.log(tmp50)
tmp52 = tmp51 + tmp45
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp19, tmp52, tmp53)
tmp55 = tmp0 >= tmp17
tmp56 = tl.full([1], 3, tl.int64)
tmp57 = tmp0 < tmp56
tmp58 = tl.load(in_ptr2 + (3 + (4*x1)), tmp55 & xmask, eviction_policy='evict_last', other=0.0)
tmp59 = tmp58.to(tl.int64)
tmp60 = tmp59 + tmp22
tmp61 = tmp59 < 0
tmp62 = tl.where(tmp61, tmp60, tmp59)
tl.device_assert(((0 <= tl.broadcast_to(tmp62, [XBLOCK])) & (tl.broadcast_to(tmp62, [XBLOCK]) < 4)) | ~(tmp55 & xmask), "index out of bounds: 0 <= tl.broadcast_to(tmp62, [XBLOCK]) < 4")
tmp64 = tl.load(in_ptr3 + (tmp62 + (4*x1)), tmp55 & xmask, eviction_policy='evict_last', other=0.0)
tmp65 = tmp64 * tmp64
tmp66 = tmp65 * tmp64
tmp67 = tmp66 * tmp30
tmp68 = tl_math.abs(tmp67)
tmp69 = tmp68 == tmp9
tmp70 = tl.where(tmp69, tmp11, tmp67)
tmp71 = tmp67 - tmp70
tmp72 = tl_math.exp(tmp71)
tmp73 = tl_math.log(tmp72)
tmp74 = tmp73 + tmp70
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp55, tmp74, tmp75)
tmp77 = tl.where(tmp19, tmp54, tmp76)
tmp78 = tl.where(tmp4, tmp15, tmp77)
tl.store(out_ptr0 + (x2), tmp78, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/6t/c6t7rj6cxxah2fyezy6pp2aw5nxel7tmlcmfeuw34oas4264vybr.py
# Topologically Sorted Source Nodes: [pow_4, score_student, ordered_student, sub_2, log_prob, mul_4, loss], Original ATen: [aten.pow, aten.mul, aten.gather, aten.sub, aten.sum, aten.mean]
# Source node to ATen node mapping:
# log_prob => sum_6
# loss => mean
# mul_4 => mul_4
# ordered_student => gather
# pow_4 => pow_4
# score_student => mul_3
# sub_2 => sub_5
# Graph fragment:
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%index_put_1, 3), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_4, -3), kwargs = {})
# %gather : [num_users=5] = call_function[target=torch.ops.aten.gather.default](args = (%mul_3, 1, %slice_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%gather, %cat), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_5, [1]), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, -1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_4,), kwargs = {})
triton_per_fused_gather_mean_mul_pow_sub_sum_5 = async_compile.triton('triton_per_fused_gather_mean_mul_pow_sub_sum_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i16', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(4,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_gather_mean_mul_pow_sub_sum_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_gather_mean_mul_pow_sub_sum_5(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + (4*r0)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + (3*r0), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + (4*r0)), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (1 + (3*r0)), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (3 + (4*r0)), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr2 + (2 + (3*r0)), None, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4), "index out of bounds: 0 <= tmp5 < 4")
tmp7 = tl.load(in_ptr1 + (tmp5 + (4*r0)), None, eviction_policy='evict_last')
tmp8 = tmp7 * tmp7
tmp9 = tmp8 * tmp7
tmp10 = -3.0
tmp11 = tmp9 * tmp10
tmp13 = tmp11 - tmp12
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 + tmp2
tmp17 = tmp15 < 0
tmp18 = tl.where(tmp17, tmp16, tmp15)
tl.device_assert((0 <= tmp18) & (tmp18 < 4), "index out of bounds: 0 <= tmp18 < 4")
tmp20 = tl.load(in_ptr1 + (tmp18 + (4*r0)), None, eviction_policy='evict_last')
tmp21 = tmp20 * tmp20
tmp22 = tmp21 * tmp20
tmp23 = tmp22 * tmp10
tmp25 = tmp23 - tmp24
tmp26 = tmp13 + tmp25
tmp28 = tmp27.to(tl.int64)
tmp29 = tmp28 + tmp2
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert((0 <= tmp31) & (tmp31 < 4), "index out of bounds: 0 <= tmp31 < 4")
tmp33 = tl.load(in_ptr1 + (tmp31 + (4*r0)), None, eviction_policy='evict_last')
tmp34 = tmp33 * tmp33
tmp35 = tmp34 * tmp33
tmp36 = tmp35 * tmp10
tmp38 = tmp36 - tmp37
tmp39 = tmp26 + tmp38
tmp40 = -1.0
tmp41 = tmp39 * tmp40
tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK])
tmp44 = tl.sum(tmp42, 1)[:, None]
tmp45 = 4.0
tmp46 = tmp44 / tmp45
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp46, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prod], Original ATen: [aten.mm]
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = buf0; del buf0 # reuse
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [add, mul, sub, res, res_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_mul_sqrt_sub_0.run(buf2, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [res, res_1, setitem], Original ATen: [aten.clamp, aten.sqrt, aten.lift_fresh, aten.index_put]
triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1.run(buf2, 4, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int16)
# Topologically Sorted Source Nodes: [pow_2, score_teacher, sort], Original ATen: [aten.pow, aten.mul, aten.sort]
triton_per_fused_mul_pow_sort_2.run(buf2, buf5, 4, 4, grid=grid(4), stream=stream0)
buf6 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [prod_1], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [add_1, mul_2, sub_1, res_3, res_4], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt]
triton_poi_fused_add_clamp_mul_sqrt_sub_0.run(buf8, arg1_1, 16, grid=grid(16), stream=stream0)
del arg1_1
# Topologically Sorted Source Nodes: [res_3, res_4, setitem_1], Original ATen: [aten.clamp, aten.sqrt, aten.lift_fresh, aten.index_put]
triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1.run(buf8, 4, grid=grid(4), stream=stream0)
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [pow_4, score_student, ordered_student, logsumexp], Original ATen: [aten.pow, aten.mul, aten.gather, aten.logsumexp]
triton_poi_fused_gather_logsumexp_mul_pow_3.run(buf5, buf8, buf10, buf11, 4, grid=grid(4), stream=stream0)
buf12 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
triton_poi_fused_stack_4.run(buf11, buf10, buf5, buf8, buf12, 12, grid=grid(12), stream=stream0)
del buf10
del buf11
buf14 = empty_strided_cuda((), (), torch.float32)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [pow_4, score_student, ordered_student, sub_2, log_prob, mul_4, loss], Original ATen: [aten.pow, aten.mul, aten.gather, aten.sub, aten.sum, aten.mean]
triton_per_fused_gather_mean_mul_pow_sub_sum_5.run(buf15, buf5, buf8, buf12, 1, 4, grid=grid(1), stream=stream0)
del buf12
del buf5
del buf8
return (buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
def pdist(e, squared=False, eps=1e-12):
e_square = e.pow(2).sum(dim=1)
prod = e @ e.t()
res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min
=eps)
if not squared:
res = res.sqrt()
res = res.clone()
res[range(len(e)), range(len(e))] = 0
return res
class HardDarkRank(nn.Module):
def __init__(self, alpha=3, beta=3, permute_len=4):
super().__init__()
self.alpha = alpha
self.beta = beta
self.permute_len = permute_len
def forward(self, student, teacher):
score_teacher = -1 * self.alpha * pdist(teacher, squared=False).pow(
self.beta)
score_student = -1 * self.alpha * pdist(student, squared=False).pow(
self.beta)
permute_idx = score_teacher.sort(dim=1, descending=True)[1][:, 1:
self.permute_len + 1]
ordered_student = torch.gather(score_student, 1, permute_idx)
log_prob = (ordered_student - torch.stack([torch.logsumexp(
ordered_student[:, i:], dim=1) for i in range(permute_idx.size(
1))], dim=1)).sum(dim=1)
loss = (-1 * log_prob).mean()
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_mul_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = 1e-12
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = libdevice.sqrt(tmp28)
tl.store(in_out_ptr0 + x2, tmp29, xmask)
@triton.jit
def triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tl.full([1], 2, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 0, tl.int64)
tmp6 = tl.where(tmp4, tmp5, tmp3)
tmp7 = tl.full([1], 3, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.where(tmp8, tmp1, tmp7)
tmp10 = tl.where(tmp2, tmp6, tmp9)
tmp11 = 0.0
tl.store(out_ptr0 + tl.broadcast_to(5 * tmp10, [XBLOCK]), tmp11, xmask)
@triton.jit
def triton_per_fused_mul_pow_sort_2(in_ptr0, out_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tmp3 = -3.0
tmp4 = tmp2 * tmp3
tmp5 = r1
tmp6 = tmp5.to(tl.int16)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp8 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
_tmp9, tmp10 = triton_helpers.sort_with_index(tmp7, tmp8, None, 1,
stable=False, descending=True)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp10, xmask)
@triton.jit
def triton_poi_fused_gather_logsumexp_mul_pow_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4) | ~xmask,
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr1 + (tmp5 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp8 = tmp7 * tmp7
tmp9 = tmp8 * tmp7
tmp10 = -3.0
tmp11 = tmp9 * tmp10
tmp13 = tmp12.to(tl.int64)
tmp14 = tmp13 + tmp2
tmp15 = tmp13 < 0
tmp16 = tl.where(tmp15, tmp14, tmp13)
tl.device_assert((0 <= tmp16) & (tmp16 < 4) | ~xmask,
'index out of bounds: 0 <= tmp16 < 4')
tmp18 = tl.load(in_ptr1 + (tmp16 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tmp18 * tmp18
tmp20 = tmp19 * tmp18
tmp21 = tmp20 * tmp10
tmp22 = triton_helpers.maximum(tmp11, tmp21)
tmp24 = tmp23.to(tl.int64)
tmp25 = tmp24 + tmp2
tmp26 = tmp24 < 0
tmp27 = tl.where(tmp26, tmp25, tmp24)
tl.device_assert((0 <= tmp27) & (tmp27 < 4) | ~xmask,
'index out of bounds: 0 <= tmp27 < 4')
tmp29 = tl.load(in_ptr1 + (tmp27 + 4 * x0), xmask, eviction_policy=
'evict_last')
tmp30 = tmp29 * tmp29
tmp31 = tmp30 * tmp29
tmp32 = tmp31 * tmp10
tmp33 = triton_helpers.maximum(tmp22, tmp32)
tmp34 = tl_math.abs(tmp33)
tmp35 = float('inf')
tmp36 = tmp34 == tmp35
tmp37 = 0.0
tmp38 = tl.where(tmp36, tmp37, tmp33)
tmp39 = tmp11 - tmp38
tmp40 = tl_math.exp(tmp39)
tmp41 = tmp21 - tmp38
tmp42 = tl_math.exp(tmp41)
tmp43 = tmp40 + tmp42
tmp44 = tmp32 - tmp38
tmp45 = tl_math.exp(tmp44)
tmp46 = tmp43 + tmp45
tl.store(out_ptr0 + x0, tmp33, xmask)
tl.store(out_ptr1 + x0, tmp46, xmask)
@triton.jit
def triton_poi_fused_stack_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 12
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 3
x1 = xindex // 3
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl_math.log(tmp5)
tmp7 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp8 = tl_math.abs(tmp7)
tmp9 = float('inf')
tmp10 = tmp8 == tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp10, tmp11, tmp7)
tmp13 = tmp6 + tmp12
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp4, tmp13, tmp14)
tmp16 = tmp0 >= tmp3
tmp17 = tl.full([1], 2, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tmp16 & tmp18
tmp20 = tl.load(in_ptr2 + (2 + 4 * x1), tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp21 = tmp20.to(tl.int64)
tmp22 = tl.full([XBLOCK], 4, tl.int32)
tmp23 = tmp21 + tmp22
tmp24 = tmp21 < 0
tmp25 = tl.where(tmp24, tmp23, tmp21)
tl.device_assert((0 <= tl.broadcast_to(tmp25, [XBLOCK])) & (tl.
broadcast_to(tmp25, [XBLOCK]) < 4) | ~(tmp19 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp25, [XBLOCK]) < 4')
tmp27 = tl.load(in_ptr3 + (tmp25 + 4 * x1), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp28 = tmp27 * tmp27
tmp29 = tmp28 * tmp27
tmp30 = -3.0
tmp31 = tmp29 * tmp30
tmp32 = tl.load(in_ptr2 + (3 + 4 * x1), tmp19 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp33 = tmp32.to(tl.int64)
tmp34 = tmp33 + tmp22
tmp35 = tmp33 < 0
tmp36 = tl.where(tmp35, tmp34, tmp33)
tl.device_assert((0 <= tl.broadcast_to(tmp36, [XBLOCK])) & (tl.
broadcast_to(tmp36, [XBLOCK]) < 4) | ~(tmp19 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp36, [XBLOCK]) < 4')
tmp38 = tl.load(in_ptr3 + (tmp36 + 4 * x1), tmp19 & xmask,
eviction_policy='evict_last', other=0.0)
tmp39 = tmp38 * tmp38
tmp40 = tmp39 * tmp38
tmp41 = tmp40 * tmp30
tmp42 = triton_helpers.maximum(tmp31, tmp41)
tmp43 = tl_math.abs(tmp42)
tmp44 = tmp43 == tmp9
tmp45 = tl.where(tmp44, tmp11, tmp42)
tmp46 = tmp31 - tmp45
tmp47 = tl_math.exp(tmp46)
tmp48 = tmp41 - tmp45
tmp49 = tl_math.exp(tmp48)
tmp50 = tmp47 + tmp49
tmp51 = tl_math.log(tmp50)
tmp52 = tmp51 + tmp45
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp19, tmp52, tmp53)
tmp55 = tmp0 >= tmp17
tl.full([1], 3, tl.int64)
tmp58 = tl.load(in_ptr2 + (3 + 4 * x1), tmp55 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp59 = tmp58.to(tl.int64)
tmp60 = tmp59 + tmp22
tmp61 = tmp59 < 0
tmp62 = tl.where(tmp61, tmp60, tmp59)
tl.device_assert((0 <= tl.broadcast_to(tmp62, [XBLOCK])) & (tl.
broadcast_to(tmp62, [XBLOCK]) < 4) | ~(tmp55 & xmask),
'index out of bounds: 0 <= tl.broadcast_to(tmp62, [XBLOCK]) < 4')
tmp64 = tl.load(in_ptr3 + (tmp62 + 4 * x1), tmp55 & xmask,
eviction_policy='evict_last', other=0.0)
tmp65 = tmp64 * tmp64
tmp66 = tmp65 * tmp64
tmp67 = tmp66 * tmp30
tmp68 = tl_math.abs(tmp67)
tmp69 = tmp68 == tmp9
tmp70 = tl.where(tmp69, tmp11, tmp67)
tmp71 = tmp67 - tmp70
tmp72 = tl_math.exp(tmp71)
tmp73 = tl_math.log(tmp72)
tmp74 = tmp73 + tmp70
tmp75 = tl.full(tmp74.shape, 0.0, tmp74.dtype)
tmp76 = tl.where(tmp55, tmp74, tmp75)
tmp77 = tl.where(tmp19, tmp54, tmp76)
tmp78 = tl.where(tmp4, tmp15, tmp77)
tl.store(out_ptr0 + x2, tmp78, xmask)
@triton.jit
def triton_per_fused_gather_mean_mul_pow_sub_sum_5(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (1 + 4 * r0), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr2 + 3 * r0, None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (2 + 4 * r0), None, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr2 + (1 + 3 * r0), None, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (3 + 4 * r0), None, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr2 + (2 + 3 * r0), None, eviction_policy='evict_last')
tmp1 = tmp0.to(tl.int64)
tmp2 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp3 = tmp1 + tmp2
tmp4 = tmp1 < 0
tmp5 = tl.where(tmp4, tmp3, tmp1)
tl.device_assert((0 <= tmp5) & (tmp5 < 4),
'index out of bounds: 0 <= tmp5 < 4')
tmp7 = tl.load(in_ptr1 + (tmp5 + 4 * r0), None, eviction_policy=
'evict_last')
tmp8 = tmp7 * tmp7
tmp9 = tmp8 * tmp7
tmp10 = -3.0
tmp11 = tmp9 * tmp10
tmp13 = tmp11 - tmp12
tmp15 = tmp14.to(tl.int64)
tmp16 = tmp15 + tmp2
tmp17 = tmp15 < 0
tmp18 = tl.where(tmp17, tmp16, tmp15)
tl.device_assert((0 <= tmp18) & (tmp18 < 4),
'index out of bounds: 0 <= tmp18 < 4')
tmp20 = tl.load(in_ptr1 + (tmp18 + 4 * r0), None, eviction_policy=
'evict_last')
tmp21 = tmp20 * tmp20
tmp22 = tmp21 * tmp20
tmp23 = tmp22 * tmp10
tmp25 = tmp23 - tmp24
tmp26 = tmp13 + tmp25
tmp28 = tmp27.to(tl.int64)
tmp29 = tmp28 + tmp2
tmp30 = tmp28 < 0
tmp31 = tl.where(tmp30, tmp29, tmp28)
tl.device_assert((0 <= tmp31) & (tmp31 < 4),
'index out of bounds: 0 <= tmp31 < 4')
tmp33 = tl.load(in_ptr1 + (tmp31 + 4 * r0), None, eviction_policy=
'evict_last')
tmp34 = tmp33 * tmp33
tmp35 = tmp34 * tmp33
tmp36 = tmp35 * tmp10
tmp38 = tmp36 - tmp37
tmp39 = tmp26 + tmp38
tmp40 = -1.0
tmp41 = tmp39 * tmp40
tmp42 = tl.broadcast_to(tmp41, [XBLOCK, RBLOCK])
tmp44 = tl.sum(tmp42, 1)[:, None]
tmp45 = 4.0
tmp46 = tmp44 / tmp45
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp46, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
buf1 = buf0
del buf0
buf2 = buf1
del buf1
get_raw_stream(0)
triton_poi_fused_add_clamp_mul_sqrt_sub_0[grid(16)](buf2, arg0_1,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1[grid(4)](buf2, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.int16)
triton_per_fused_mul_pow_sort_2[grid(4)](buf2, buf5, 4, 4, XBLOCK=1,
num_warps=2, num_stages=1)
buf6 = buf2
del buf2
extern_kernels.mm(arg1_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4),
0), out=buf6)
buf7 = buf6
del buf6
buf8 = buf7
del buf7
triton_poi_fused_add_clamp_mul_sqrt_sub_0[grid(16)](buf8, arg1_1,
16, XBLOCK=16, num_warps=1, num_stages=1)
del arg1_1
triton_poi_fused_clamp_index_put_lift_fresh_sqrt_1[grid(4)](buf8, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf11 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_gather_logsumexp_mul_pow_3[grid(4)](buf5, buf8,
buf10, buf11, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 3), (3, 1), torch.float32)
triton_poi_fused_stack_4[grid(12)](buf11, buf10, buf5, buf8, buf12,
12, XBLOCK=16, num_warps=1, num_stages=1)
del buf10
del buf11
buf14 = empty_strided_cuda((), (), torch.float32)
buf15 = buf14
del buf14
triton_per_fused_gather_mean_mul_pow_sub_sum_5[grid(1)](buf15, buf5,
buf8, buf12, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf12
del buf5
del buf8
return buf15,
def pdist(e, squared=False, eps=1e-12):
e_square = e.pow(2).sum(dim=1)
prod = e @ e.t()
res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min
=eps)
if not squared:
res = res.sqrt()
res = res.clone()
res[range(len(e)), range(len(e))] = 0
return res
class HardDarkRankNew(nn.Module):
def __init__(self, alpha=3, beta=3, permute_len=4):
super().__init__()
self.alpha = alpha
self.beta = beta
self.permute_len = permute_len
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| ahu-hpt/AOMD | HardDarkRank | false | 3,049 | [
"Apache-2.0"
] | 0 | 8d99dbb803feaef55fc089bfb3399d2fb21d55d8 | https://github.com/ahu-hpt/AOMD/tree/8d99dbb803feaef55fc089bfb3399d2fb21d55d8 | import torch
import torch.nn as nn
def pdist(e, squared=False, eps=1e-12):
e_square = e.pow(2).sum(dim=1)
prod = e @ e.t()
res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) - 2 * prod).clamp(min
=eps)
if not squared:
res = res.sqrt()
res = res.clone()
res[range(len(e)), range(len(e))] = 0
return res
class Model(nn.Module):
def __init__(self, alpha=3, beta=3, permute_len=4):
super().__init__()
self.alpha = alpha
self.beta = beta
self.permute_len = permute_len
def forward(self, student, teacher):
score_teacher = -1 * self.alpha * pdist(teacher, squared=False).pow(
self.beta)
score_student = -1 * self.alpha * pdist(student, squared=False).pow(
self.beta)
permute_idx = score_teacher.sort(dim=1, descending=True)[1][:, 1:
self.permute_len + 1]
ordered_student = torch.gather(score_student, 1, permute_idx)
log_prob = (ordered_student - torch.stack([torch.logsumexp(
ordered_student[:, i:], dim=1) for i in range(permute_idx.size(
1))], dim=1)).sum(dim=1)
loss = (-1 * log_prob).mean()
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return []
|
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/2x/c2x66b6sza3svon43c774fqn45xzpdlajk7fj3gf6dzmp6nxl7jx.py
# Topologically Sorted Source Nodes: [add, ha], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# add => add
# ha => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %unsqueeze), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_0 = async_compile.triton('triton_poi_fused_add_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = (xindex // 256)
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(out_ptr0 + (x6), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xk/cxkugsynlmnyrjhah42fewrhwovuvurnuv2qimo2qhxq27wjmq7q.py
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pi => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/jf/cjfzp64ny4hf7wdw5wptah3hqv5fcsh5rrw4brz7uxcy6ad57n7h.py
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# pi => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/7d/c7dc43ix2e7akpvghdahgzz7txjbdnsldyrpunf4qbwwkeqbzebt.py
# Topologically Sorted Source Nodes: [mul, vi_attended, u], Original ATen: [aten.mul, aten.sum, aten.add]
# Source node to ATen node mapping:
# mul => mul
# u => add_1
# vi_attended => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_3), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %primals_6), kwargs = {})
triton_poi_fused_add_mul_sum_3 = async_compile.triton('triton_poi_fused_add_mul_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 16
x2 = (xindex // 64)
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (x4), xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + (x4), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, ha], Original ATen: [aten.add, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_add_tanh_0.run(buf0, primals_2, buf1, primals_5, buf2, 1024, grid=grid(1024), stream=stream0)
del primals_2
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [ha_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [pi], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [mul, vi_attended, u], Original ATen: [aten.mul, aten.sum, aten.add]
triton_poi_fused_add_mul_sum_3.run(buf6, primals_3, primals_6, buf7, 256, grid=grid(256), stream=stream0)
return (buf7, buf6, primals_3, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf2, buf6, primals_7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class Attention(nn.Module):
def __init__(self, num_channels, embed_size, dropout=True):
"""Stacked attention Module
"""
super(Attention, self).__init__()
self.ff_image = nn.Linear(embed_size, num_channels)
self.ff_questions = nn.Linear(embed_size, num_channels)
self.dropout = nn.Dropout(p=0.5)
self.ff_attention = nn.Linear(num_channels, 1)
def forward(self, vi, vq):
"""Extract feature vector from image vector.
"""
hi = self.ff_image(vi)
hq = self.ff_questions(vq).unsqueeze(dim=1)
ha = torch.tanh(hi + hq)
if self.dropout:
ha = self.dropout(ha)
ha = self.ff_attention(ha)
pi = torch.softmax(ha, dim=1)
self.pi = pi
vi_attended = (pi * vi).sum(dim=1)
u = vi_attended + vq
return u
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_channels': 4, 'embed_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_tanh_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex % 256
x0 = xindex % 4
x3 = xindex // 256
x5 = xindex % 64
x6 = xindex
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp7 = libdevice.tanh(tmp6)
tl.store(out_ptr0 + x6, tmp7, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_mul_sum_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 16
x2 = xindex // 64
x3 = xindex % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + (64 + x3), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (128 + x3), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (192 + x3), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + x4, xmask)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp16 = tmp14 + tmp15
tl.store(out_ptr0 + x4, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (1, 4), (4, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_add_tanh_0[grid(1024)](buf0, primals_2, buf1,
primals_5, buf2, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
del primals_5
buf4 = reinterpret_tensor(buf1, (256, 1), (1, 1), 0)
del buf1
extern_kernels.addmm(primals_8, reinterpret_tensor(buf2, (256, 4),
(4, 1), 0), reinterpret_tensor(primals_7, (4, 1), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_8
buf5 = reinterpret_tensor(buf0, (4, 4, 4, 4, 1), (64, 16, 4, 1, 256), 0
)
del buf0
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4, 1), (64, 16, 4, 1, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf5
triton_poi_fused_add_mul_sum_3[grid(256)](buf6, primals_3,
primals_6, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
return buf7, buf6, primals_3, reinterpret_tensor(primals_6, (64, 4), (4,
1), 0), buf2, buf6, primals_7
class AttentionNew(nn.Module):
def __init__(self, num_channels, embed_size, dropout=True):
"""Stacked attention Module
"""
super(AttentionNew, self).__init__()
self.ff_image = nn.Linear(embed_size, num_channels)
self.ff_questions = nn.Linear(embed_size, num_channels)
self.dropout = nn.Dropout(p=0.5)
self.ff_attention = nn.Linear(num_channels, 1)
def forward(self, input_0, input_1):
primals_1 = self.ff_image.weight
primals_2 = self.ff_image.bias
primals_4 = self.ff_questions.weight
primals_5 = self.ff_questions.bias
primals_7 = self.ff_attention.weight
primals_8 = self.ff_attention.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| ahmed563/Group14_Project_DLSpring2021 | Attention | false | 3,050 | [
"MIT"
] | 0 | d2b03555cadb483ae472b613f107173f89c07d9b | https://github.com/ahmed563/Group14_Project_DLSpring2021/tree/d2b03555cadb483ae472b613f107173f89c07d9b | import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self, num_channels, embed_size, dropout=True):
"""Stacked attention Module
"""
super().__init__()
self.ff_image = nn.Linear(embed_size, num_channels)
self.ff_questions = nn.Linear(embed_size, num_channels)
self.dropout = nn.Dropout(p=0.5)
self.ff_attention = nn.Linear(num_channels, 1)
def forward(self, vi, vq):
"""Extract feature vector from image vector.
"""
hi = self.ff_image(vi)
hq = self.ff_questions(vq).unsqueeze(dim=1)
ha = torch.tanh(hi + hq)
if self.dropout:
ha = self.dropout(ha)
ha = self.ff_attention(ha)
pi = torch.softmax(ha, dim=1)
self.pi = pi
vi_attended = (pi * vi).sum(dim=1)
u = vi_attended + vq
return u
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/wb/cwban6vrsf5bwkkkcwxjqgr3j3df4rtyzenlfm7ijyld6jqf76er.py
# Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.tanh]
# Source node to ATen node mapping:
# pi1 => tanh
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_3), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16, ), (1, ))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16, ), (1, ))
assert_size_stride(primals_8, (1, 16), (16, 1))
assert_size_stride(primals_9, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_tanh_0.run(buf1, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_6, (4, 16), (1, 4), 0), out=buf3)
del primals_6
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [v1], Original ATen: [aten.tanh]
triton_poi_fused_tanh_0.run(buf4, primals_7, 64, grid=grid(64), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [values], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8, (16, 1), (1, 16), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (buf2, buf6, primals_1, buf1, buf4, primals_8, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0.0, std=0.1)
nn.init.constant_(layer.bias, 0.0)
class Net(nn.Module):
def __init__(self, s_dim, a_dim, hidden=16):
super(Net, self).__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.pi1 = nn.Linear(s_dim, hidden)
self.pi2 = nn.Linear(hidden, a_dim)
self.v1 = nn.Linear(s_dim, hidden)
self.v2 = nn.Linear(hidden, 1)
set_init([self.pi1, self.pi2, self.v1, self.v2])
self.distribution = torch.distributions.Categorical
self.initialized = True
def forward(self, x):
x = torch.flatten(x, start_dim=1)
pi1 = torch.tanh(self.pi1(x))
logits = self.pi2(pi1)
v1 = torch.tanh(self.v1(x))
values = self.v2(v1)
return logits, values
def choose_action(self, s):
self.eval()
logits, _ = self.forward(s)
prob = F.softmax(logits, dim=1).data
m = self.distribution(prob)
return m.sample().numpy()[0]
def loss_func(self, s, a, v_t):
self.train()
logits, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
probs = F.softmax(logits, dim=1)
m = self.distribution(probs)
exp_v = m.log_prob(a) * td.detach().squeeze()
a_loss = -exp_v
total_loss = (c_loss + a_loss).mean()
return total_loss
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'s_dim': 4, 'a_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (16, 4), (4, 1))
assert_size_stride(primals_3, (16,), (1,))
assert_size_stride(primals_4, (4, 16), (16, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (16, 4), (4, 1))
assert_size_stride(primals_7, (16,), (1,))
assert_size_stride(primals_8, (1, 16), (16, 1))
assert_size_stride(primals_9, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 16),
(1, 4), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_tanh_0[grid(64)](buf1, primals_3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_6, (4, 16),
(1, 4), 0), out=buf3)
del primals_6
buf4 = buf3
del buf3
triton_poi_fused_tanh_0[grid(64)](buf4, primals_7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_9, buf4, reinterpret_tensor(primals_8,
(16, 1), (1, 16), 0), alpha=1, beta=1, out=buf6)
del primals_9
return buf2, buf6, primals_1, buf1, buf4, primals_8, primals_4
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0.0, std=0.1)
nn.init.constant_(layer.bias, 0.0)
class NetNew(nn.Module):
def __init__(self, s_dim, a_dim, hidden=16):
super(NetNew, self).__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.pi1 = nn.Linear(s_dim, hidden)
self.pi2 = nn.Linear(hidden, a_dim)
self.v1 = nn.Linear(s_dim, hidden)
self.v2 = nn.Linear(hidden, 1)
set_init([self.pi1, self.pi2, self.v1, self.v2])
self.distribution = torch.distributions.Categorical
self.initialized = True
def choose_action(self, s):
self.eval()
logits, _ = self.forward(s)
prob = F.softmax(logits, dim=1).data
m = self.distribution(prob)
return m.sample().numpy()[0]
def loss_func(self, s, a, v_t):
self.train()
logits, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
probs = F.softmax(logits, dim=1)
m = self.distribution(probs)
exp_v = m.log_prob(a) * td.detach().squeeze()
a_loss = -exp_v
total_loss = (c_loss + a_loss).mean()
return total_loss
def forward(self, input_0):
primals_2 = self.pi1.weight
primals_3 = self.pi1.bias
primals_4 = self.pi2.weight
primals_5 = self.pi2.bias
primals_6 = self.v1.weight
primals_7 = self.v1.bias
primals_8 = self.v2.weight
primals_9 = self.v2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0], output[1]
| aivaslab/marlgrid | Net | false | 3,051 | [
"Apache-2.0"
] | 0 | 10b53d27ce224fadeeb5830d6034350a69feb4b4 | https://github.com/aivaslab/marlgrid/tree/10b53d27ce224fadeeb5830d6034350a69feb4b4 | import torch
import torch.nn as nn
import torch.nn.functional as F
def set_init(layers):
for layer in layers:
nn.init.normal_(layer.weight, mean=0.0, std=0.1)
nn.init.constant_(layer.bias, 0.0)
class Model(nn.Module):
def __init__(self, s_dim, a_dim, hidden=16):
super().__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.pi1 = nn.Linear(s_dim, hidden)
self.pi2 = nn.Linear(hidden, a_dim)
self.v1 = nn.Linear(s_dim, hidden)
self.v2 = nn.Linear(hidden, 1)
set_init([self.pi1, self.pi2, self.v1, self.v2])
self.distribution = torch.distributions.Categorical
self.initialized = True
def forward(self, x):
x = torch.flatten(x, start_dim=1)
pi1 = torch.tanh(self.pi1(x))
logits = self.pi2(pi1)
v1 = torch.tanh(self.v1(x))
values = self.v2(v1)
return logits, values
def choose_action(self, s):
self.eval()
logits, _ = self.forward(s)
prob = F.softmax(logits, dim=1).data
m = self.distribution(prob)
return m.sample().numpy()[0]
def loss_func(self, s, a, v_t):
self.train()
logits, values = self.forward(s)
td = v_t - values
c_loss = td.pow(2)
probs = F.softmax(logits, dim=1)
m = self.distribution(probs)
exp_v = m.log_prob(a) * td.detach().squeeze()
a_loss = -exp_v
total_loss = (c_loss + a_loss).mean()
return total_loss
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Actor | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/vn/cvnpex5pi5isev3fnvj6qndrzy76i24cv6txnyr654zq7d6c6srb.py
# Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.clamp, aten.div]
# Source node to ATen node mapping:
# output_1 => clamp_max, clamp_min
# output_2 => div
# Graph fragment:
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%view_1, 0), kwargs = {})
# %clamp_max : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, %expand), kwargs = {})
triton_poi_fused_clamp_div_1 = async_compile.triton('triton_poi_fused_clamp_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp5 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (256 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (384 + x0), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = triton_helpers.minimum(tmp6, tmp3)
tmp8 = tmp7 * tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = triton_helpers.minimum(tmp10, tmp3)
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp15 = triton_helpers.maximum(tmp14, tmp1)
tmp16 = triton_helpers.minimum(tmp15, tmp3)
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = triton_helpers.maximum(tmp19, tmp1)
tmp21 = triton_helpers.minimum(tmp20, tmp3)
tmp22 = tmp21 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = tmp4 / tmp26
tl.store(out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1, output_2], Original ATen: [aten.clamp, aten.div]
triton_poi_fused_clamp_div_1.run(buf1, buf2, 512, grid=grid(512), stream=stream0)
return (buf2, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
class Actor(nn.Module):
def __init__(self, input_dim, output_dim):
super(Actor, self).__init__()
self.layer1 = nn.Linear(input_dim, output_dim, bias=True)
def _format(self, state):
x = state
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
x = x.unsqueeze(0)
return x
def forward(self, a, p):
input = self._format(torch.cat((a, p), dim=1))
output = self.layer1(input)
output = torch.clamp(output, 0, 1)
output = F.normalize(output, dim=0)
return output
def full_pass(self, a, p):
logits = self.forward(a, p)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
logpa = dist.log_prob(action).unsqueeze(-1)
entropy = dist.entropy().unsqueeze(-1)
is_exploratory = action != np.argmax(logits.detach().numpy())
return action.item(), is_exploratory.item(), logpa, entropy
def select_action(self, a, p):
logits = self.forward(a, p)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
return action.item()
def select_greedy_action(self, a, p):
logits = self.forward(a, p)
return np.argmax(logits.detach().numpy())
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_dim': 4, 'output_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_clamp_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp5 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (128 + x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (256 + x0), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (384 + x0), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = 1.0
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp5, tmp1)
tmp7 = triton_helpers.minimum(tmp6, tmp3)
tmp8 = tmp7 * tmp7
tmp10 = triton_helpers.maximum(tmp9, tmp1)
tmp11 = triton_helpers.minimum(tmp10, tmp3)
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp15 = triton_helpers.maximum(tmp14, tmp1)
tmp16 = triton_helpers.minimum(tmp15, tmp3)
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp20 = triton_helpers.maximum(tmp19, tmp1)
tmp21 = triton_helpers.minimum(tmp20, tmp3)
tmp22 = tmp21 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = libdevice.sqrt(tmp23)
tmp25 = 1e-12
tmp26 = triton_helpers.maximum(tmp24, tmp25)
tmp27 = tmp4 / tmp26
tl.store(out_ptr0 + x2, tmp27, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((128, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (128, 4),
(4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
triton_poi_fused_clamp_div_1[grid(512)](buf1, buf2, 512, XBLOCK=256,
num_warps=4, num_stages=1)
return buf2, reinterpret_tensor(buf0, (128, 4), (4, 1), 0), buf1
class ActorNew(nn.Module):
def __init__(self, input_dim, output_dim):
super(ActorNew, self).__init__()
self.layer1 = nn.Linear(input_dim, output_dim, bias=True)
def _format(self, state):
x = state
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
x = x.unsqueeze(0)
return x
def full_pass(self, a, p):
logits = self.forward(a, p)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
logpa = dist.log_prob(action).unsqueeze(-1)
entropy = dist.entropy().unsqueeze(-1)
is_exploratory = action != np.argmax(logits.detach().numpy())
return action.item(), is_exploratory.item(), logpa, entropy
def select_action(self, a, p):
logits = self.forward(a, p)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
return action.item()
def select_greedy_action(self, a, p):
logits = self.forward(a, p)
return np.argmax(logits.detach().numpy())
def forward(self, input_0, input_1):
primals_3 = self.layer1.weight
primals_4 = self.layer1.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| ajy8456/active-recognition | Actor | false | 3,052 | [
"MIT"
] | 0 | 7d3a4bbfceaf5fb32cd43f62636f36a10ab63807 | https://github.com/ajy8456/active-recognition/tree/7d3a4bbfceaf5fb32cd43f62636f36a10ab63807 | import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
def __init__(self, input_dim, output_dim):
super().__init__()
self.layer1 = nn.Linear(input_dim, output_dim, bias=True)
def _format(self, state):
x = state
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, dtype=torch.float32)
x = x.unsqueeze(0)
return x
def forward(self, a, p):
input = self._format(torch.cat((a, p), dim=1))
output = self.layer1(input)
output = torch.clamp(output, 0, 1)
output = F.normalize(output, dim=0)
return output
def full_pass(self, a, p):
logits = self.forward(a, p)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
logpa = dist.log_prob(action).unsqueeze(-1)
entropy = dist.entropy().unsqueeze(-1)
is_exploratory = action != np.argmax(logits.detach().numpy())
return action.item(), is_exploratory.item(), logpa, entropy
def select_action(self, a, p):
logits = self.forward(a, p)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
return action.item()
def select_greedy_action(self, a, p):
logits = self.forward(a, p)
return np.argmax(logits.detach().numpy())
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
ContractingBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/jx/cjx4mzuiz4tmnrqadhwcskslq3cjulos3cv5m45t7hnxysbxc7zw.py
# Topologically Sorted Source Nodes: [conv2d, fx], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# fx => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 3844) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/qm/cqm7o42bi3o7atdx5j5wot4fthliaoprruled3wki2munl66kqum.py
# Topologically Sorted Source Nodes: [conv2d_1, fx_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# fx_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 3600) % 4
x0 = xindex % 3600
x3 = (xindex // 3600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x0 + (3712*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, fx], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 61504, grid=grid(61504), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 60, 60), (14400, 3600, 60, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 60, 60), (14848, 3712, 60, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, fx_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_1.run(buf3, primals_5, buf4, 57600, grid=grid(57600), stream=stream0)
del primals_5
return (buf3, primals_1, primals_3, primals_4, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class ContractingBlock(nn.Module):
"""
ContractingBlock Class
Performs two convolutions followed by a max pool operation.
Values:
input_channels: the number of channels to expect from a given input
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv3x3_0 = nn.Conv2d(in_channels, out_channels, kernel_size=3)
self.conv3x3_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3)
def forward(self, x):
"""
Function for completing a forward pass of ContractingBlock:
Given an image tensor, completes a contracting block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
"""
fx = F.relu(self.conv3x3_0(x))
fx = F.relu(self.conv3x3_1(fx))
return fx
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 61504
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 3844 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 57600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 3600 % 4
x0 = xindex % 3600
x3 = xindex // 3600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x0 + 3712 * x3), tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 62, 62), (15376, 3844, 62, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(61504)](buf1, primals_2,
61504, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 60, 60), (14400, 3600, 60, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 60, 60), (14848, 3712, 60, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_1[grid(57600)](
buf3, primals_5, buf4, 57600, XBLOCK=512, num_warps=4, num_stages=1
)
del primals_5
return buf3, primals_1, primals_3, primals_4, buf1, buf4
class ContractingBlockNew(nn.Module):
"""
ContractingBlock Class
Performs two convolutions followed by a max pool operation.
Values:
input_channels: the number of channels to expect from a given input
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv3x3_0 = nn.Conv2d(in_channels, out_channels, kernel_size=3)
self.conv3x3_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3)
def forward(self, input_0):
primals_1 = self.conv3x3_0.weight
primals_2 = self.conv3x3_0.bias
primals_4 = self.conv3x3_1.weight
primals_5 = self.conv3x3_1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| akanametov/unet-pytorch | ContractingBlock | false | 3,054 | [
"MIT"
] | 0 | 6cf0f70674958356ea4ac36fe61b0415921f72ae | https://github.com/akanametov/unet-pytorch/tree/6cf0f70674958356ea4ac36fe61b0415921f72ae | import torch
from torch import nn
from torch.nn import functional as F
class Model(nn.Module):
"""
ContractingBlock Class
Performs two convolutions followed by a max pool operation.
Values:
input_channels: the number of channels to expect from a given input
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv3x3_0 = nn.Conv2d(in_channels, out_channels, kernel_size=3)
self.conv3x3_1 = nn.Conv2d(out_channels, out_channels, kernel_size=3)
def forward(self, x):
"""
Function for completing a forward pass of ContractingBlock:
Given an image tensor, completes a contracting block and returns the transformed tensor.
Parameters:
x: image tensor of shape (batch size, channels, height, width)
"""
fx = F.relu(self.conv3x3_0(x))
fx = F.relu(self.conv3x3_1(fx))
return fx
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [4, 4]
|
SqueezeExcitation | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sh/cshbgrlhlsuuebcz7jbje66sr2nkeng6kilqpqluwrr5ru2afxle.py
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/26/c26hslzuqyqrl2dlvx6b4c4z6rjyq53xujfgy2jxshfryia5ki6x.py
# Topologically Sorted Source Nodes: [x_2, add], Original ATen: [aten.sigmoid, aten.add]
# Source node to ATen node mapping:
# add => add
# x_2 => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, %primals_1), kwargs = {})
triton_poi_fused_add_sigmoid_3 = async_compile.triton('triton_poi_fused_add_sigmoid_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sigmoid_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 + tmp2
tl.store(out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 32, grid=grid(32), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 16, grid=grid(16), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2, add], Original ATen: [aten.sigmoid, aten.add]
triton_poi_fused_add_sigmoid_3.run(buf5, primals_1, buf6, 256, grid=grid(256), stream=stream0)
del primals_1
return (buf6, primals_2, primals_4, buf1, buf3, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def make_divisible(v, divisor=8, min_value=None):
"""
The channel number of each layer should be divisable by 8.
The function is taken from
github.com/rwightman/pytorch-image-models/master/timm/models/layers/helpers.py
"""
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcitation(nn.Module):
def __init__(self, in_channels: 'int', reduction: 'int'=4, out_channels:
'int'=-1, **kwargs: dict):
super(SqueezeExcitation, self).__init__()
assert in_channels > 0
num_reduced_channels = make_divisible(max(out_channels, 8) //
reduction, 8)
self.fc1 = nn.Conv2d(in_channels, num_reduced_channels, kernel_size=1)
self.fc2 = nn.Conv2d(num_reduced_channels, in_channels, kernel_size=1)
self.activation = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, inp):
x = F.adaptive_avg_pool2d(inp, 1)
x = self.activation(self.fc1(x))
x = self.sigmoid(self.fc2(x))
return x + inp
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_sigmoid_3(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 + tmp2
tl.store(out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(32)](buf3, primals_3, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(16)](buf5, primals_5, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_sigmoid_3[grid(256)](buf5, primals_1, buf6,
256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
return buf6, primals_2, primals_4, buf1, buf3, buf5
def make_divisible(v, divisor=8, min_value=None):
"""
The channel number of each layer should be divisable by 8.
The function is taken from
github.com/rwightman/pytorch-image-models/master/timm/models/layers/helpers.py
"""
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcitationNew(nn.Module):
def __init__(self, in_channels: 'int', reduction: 'int'=4, out_channels:
'int'=-1, **kwargs: dict):
super(SqueezeExcitationNew, self).__init__()
assert in_channels > 0
num_reduced_channels = make_divisible(max(out_channels, 8) //
reduction, 8)
self.fc1 = nn.Conv2d(in_channels, num_reduced_channels, kernel_size=1)
self.fc2 = nn.Conv2d(num_reduced_channels, in_channels, kernel_size=1)
self.activation = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| akashAD98/EfficientNetv2-with-Detectron2 | SqueezeExcitation | false | 3,055 | [
"Apache-2.0"
] | 0 | 1ba7f32cda31550ed4a040c15271612fa3f73d74 | https://github.com/akashAD98/EfficientNetv2-with-Detectron2/tree/1ba7f32cda31550ed4a040c15271612fa3f73d74 | import torch
import torch.nn as nn
import torch.nn.functional as F
def make_divisible(v, divisor=8, min_value=None):
"""
The channel number of each layer should be divisable by 8.
The function is taken from
github.com/rwightman/pytorch-image-models/master/timm/models/layers/helpers.py
"""
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class Model(nn.Module):
def __init__(self, in_channels: 'int', reduction: 'int'=4, out_channels:
'int'=-1, **kwargs: dict):
super().__init__()
assert in_channels > 0
num_reduced_channels = make_divisible(max(out_channels, 8) //
reduction, 8)
self.fc1 = nn.Conv2d(in_channels, num_reduced_channels, kernel_size=1)
self.fc2 = nn.Conv2d(num_reduced_channels, in_channels, kernel_size=1)
self.activation = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, inp):
x = F.adaptive_avg_pool2d(inp, 1)
x = self.activation(self.fc1(x))
x = self.sigmoid(self.fc2(x))
return x + inp
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
SimpleGCN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/oh/cohgrxe3lxmrv72r6z5fwk7g676tipqeoy4yj23mmym2s32bis5c.py
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.cat, aten.add]
# Source node to ATen node mapping:
# output => cat
# output_1 => add
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%mm_1, %slice_4], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%cat, %primals_4), kwargs = {})
triton_poi_fused_add_cat_0 = async_compile.triton('triton_poi_fused_add_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp11 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((2*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 4, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (2 + (4*x1) + ((-2) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [support], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [side_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 2), (4, 1), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output, output_1], Original ATen: [aten.cat, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_cat_0.run(buf1, buf0, primals_4, buf2, 16, grid=grid(16), stream=stream0)
del buf0
del buf1
del primals_4
return (buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import Parameter
import torch.nn
import torch.autograd
class SimpleGCN(nn.Module):
"""A simple graph convolution layer, similar to the one defined in
Kipf et al. https://arxiv.org/abs/1609.02907
.. note::
If you use this code, please cite the original paper in addition to Kaolin.
.. code-block::
@article{kipf2016semi,
title={Semi-Supervised Classification with Graph Convolutional Networks},
author={Kipf, Thomas N and Welling, Max},
journal={arXiv preprint arXiv:1609.02907},
year={2016}
}
"""
def __init__(self, in_features, out_features, bias=True):
super(SimpleGCN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight1 = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 6.0 / math.sqrt(self.weight1.size(1) + self.weight1.size(0))
stdv *= 0.6
self.weight1.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input, adj):
support = torch.mm(input, self.weight1)
side_len = max(support.shape[1] // 3, 2)
if adj.type() == 'torch.cuda.sparse.FloatTensor':
norm = torch.sparse.mm(adj, torch.ones((support.shape[0], 1)))
normalized_support = support[:, :side_len] / norm
side_1 = torch.sparse.mm(adj, normalized_support)
else:
side_1 = torch.mm(adj, support[:, :side_len])
side_2 = support[:, side_len:]
output = torch.cat((side_1, side_2), dim=1)
if self.bias is not None:
output = output + self.bias
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import Parameter
import torch.nn
import torch.autograd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp11 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 4, tl.int64)
tmp9 = tl.load(in_ptr1 + (2 + 4 * x1 + (-2 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tmp12 = tmp10 + tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 2), (2, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 2), (4, 1
), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_cat_0[grid(16)](buf1, buf0, primals_4, buf2,
16, XBLOCK=16, num_warps=1, num_stages=1)
del buf0
del buf1
del primals_4
return buf2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0
), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0)
class SimpleGCNNew(nn.Module):
"""A simple graph convolution layer, similar to the one defined in
Kipf et al. https://arxiv.org/abs/1609.02907
.. note::
If you use this code, please cite the original paper in addition to Kaolin.
.. code-block::
@article{kipf2016semi,
title={Semi-Supervised Classification with Graph Convolutional Networks},
author={Kipf, Thomas N and Welling, Max},
journal={arXiv preprint arXiv:1609.02907},
year={2016}
}
"""
def __init__(self, in_features, out_features, bias=True):
super(SimpleGCNNew, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight1 = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 6.0 / math.sqrt(self.weight1.size(1) + self.weight1.size(0))
stdv *= 0.6
self.weight1.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-0.1, 0.1)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0, input_1):
primals_1 = self.weight1
primals_4 = self.bias
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| akashgokul/kaolin | SimpleGCN | false | 3,056 | [
"ECL-2.0",
"Apache-2.0"
] | 0 | 6360c4f2bcdd81f461dfb4d96267e79d89d5e112 | https://github.com/akashgokul/kaolin/tree/6360c4f2bcdd81f461dfb4d96267e79d89d5e112 | import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn import Parameter
import torch.nn
import torch.autograd
class Model(nn.Module):
"""A simple graph convolution layer, similar to the one defined in
Kipf et al. https://arxiv.org/abs/1609.02907
.. note::
If you use this code, please cite the original paper in addition to Kaolin.
.. code-block::
@article{kipf2016semi,
title={Semi-Supervised Classification with Graph Convolutional Networks},
author={Kipf, Thomas N and Welling, Max},
journal={arXiv preprint arXiv:1609.02907},
year={2016}
}
"""
def __init__(self, in_features, out_features, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight1 = Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 6.0 / math.sqrt(self.weight1.size(1) + self.weight1.size(0))
stdv *= 0.6
self.weight1.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input, adj):
support = torch.mm(input, self.weight1)
side_len = max(support.shape[1] // 3, 2)
if adj.type() == 'torch.cuda.sparse.FloatTensor':
norm = torch.sparse.mm(adj, torch.ones((support.shape[0], 1)))
normalized_support = support[:, :side_len] / norm
side_1 = torch.sparse.mm(adj, normalized_support)
else:
side_1 = torch.mm(adj, support[:, :side_len])
side_2 = support[:, side_len:]
output = torch.cat((side_1, side_2), dim=1)
if self.bias is not None:
output = output + self.bias
return output
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [4, 4]
|
Discriminator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/c7/cc7ew5e5macgqb56lqlcg7icjy35237zalpxaiyszf6jy5fcpfuv.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out => convolution
# out_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 369024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 961) % 96
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/xk/cxkvyfvspf73ksgokcqqswzdbo3mm6zcwn3oysmga7t3dttjw46i.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_2 => convolution_1
# out_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 196) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bz/cbzvzpyfx3oivvtdguuwq32yrfatzdn6j7z2x5yvazqpwq5w6suy.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# out_4 => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (14 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (15 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (16 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (28 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (29 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (30 + (2*x0) + (28*x1) + (196*x2)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + (x3), tmp16, xmask)
tl.store(out_ptr1 + (x3), tmp41, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/v6/cv65cney566y6h5klpmakajve33zudsdblkqxhrcx26ofxbsvp5p.py
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# out_5 => convolution_2
# out_6 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_3 = async_compile.triton('triton_poi_fused_convolution_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dr/cdrfa4j4sg4tuquz2onfpx2tmwrrqsxoubj47ovzcbet5hovoncx.py
# Topologically Sorted Source Nodes: [out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_10 => relu_4
# out_9 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (96, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_2, (96, ), (1, ))
assert_size_stride(primals_3, (4, 1, 128, 128), (16384, 16384, 128, 1))
assert_size_stride(primals_4, (64, 96, 5, 5), (2400, 25, 5, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (2, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_11, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 96, 31, 31), (92256, 961, 31, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 369024, grid=grid(369024), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 14, 14), (12544, 196, 14, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 50176, grid=grid(50176), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.float32)
buf5 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.int8)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, buf5, 9216, grid=grid(9216), stream=stream0)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 2, 2), (128, 4, 2, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [out_5, out_6], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf7, primals_7, 512, grid=grid(512), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 2, 2), (128, 4, 2, 1))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [out_7, out_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_3.run(buf9, primals_9, 512, grid=grid(512), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [out_9], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 2, 2, 2), (8, 4, 2, 1))
buf11 = buf10; del buf10 # reuse
buf12 = empty_strided_cuda((4, 2, 2, 2), (8, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_9, out_10], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_4.run(buf11, primals_11, buf12, 32, grid=grid(32), stream=stream0)
del primals_11
return (reinterpret_tensor(buf11, (4, 4, 2), (8, 2, 1), 0), primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf4, buf5, buf7, buf9, buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((96, 1, 7, 7), (49, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((96, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 128, 128), (16384, 16384, 128, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 96, 5, 5), (2400, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((32, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((2, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class Discriminator(nn.Module):
def __init__(self, in_features=1):
super(Discriminator, self).__init__()
self.conv1 = nn.Conv2d(in_features, 96, kernel_size=7, stride=4)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(96, 64, kernel_size=5, stride=2)
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(32, 32, kernel_size=1, stride=1)
self.conv5 = nn.Conv2d(32, 2, kernel_size=1, stride=1)
def forward(self, x):
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.max_pool(out)
out = self.conv3(out)
out = self.relu(out)
out = self.conv4(out)
out = self.relu(out)
out = self.conv5(out)
out = self.relu(out)
return out.view(out.size(0), -1, 2)
def get_inputs():
return [torch.rand([4, 1, 128, 128])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 369024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 961 % 96
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 196 % 64
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 9216
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (14 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (15 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (16 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (28 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (29 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (30 + 2 * x0 + 28 * x1 + 196 * x2), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp8 = triton_helpers.maximum(tmp7, tmp6)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp16 = triton_helpers.maximum(tmp15, tmp14)
tmp17 = tmp1 > tmp0
tmp18 = tl.full([1], 1, tl.int8)
tmp19 = tl.full([1], 0, tl.int8)
tmp20 = tl.where(tmp17, tmp18, tmp19)
tmp21 = tmp3 > tmp2
tmp22 = tl.full([1], 2, tl.int8)
tmp23 = tl.where(tmp21, tmp22, tmp20)
tmp24 = tmp5 > tmp4
tmp25 = tl.full([1], 3, tl.int8)
tmp26 = tl.where(tmp24, tmp25, tmp23)
tmp27 = tmp7 > tmp6
tmp28 = tl.full([1], 4, tl.int8)
tmp29 = tl.where(tmp27, tmp28, tmp26)
tmp30 = tmp9 > tmp8
tmp31 = tl.full([1], 5, tl.int8)
tmp32 = tl.where(tmp30, tmp31, tmp29)
tmp33 = tmp11 > tmp10
tmp34 = tl.full([1], 6, tl.int8)
tmp35 = tl.where(tmp33, tmp34, tmp32)
tmp36 = tmp13 > tmp12
tmp37 = tl.full([1], 7, tl.int8)
tmp38 = tl.where(tmp36, tmp37, tmp35)
tmp39 = tmp15 > tmp14
tmp40 = tl.full([1], 8, tl.int8)
tmp41 = tl.where(tmp39, tmp40, tmp38)
tl.store(out_ptr0 + x3, tmp16, xmask)
tl.store(out_ptr1 + x3, tmp41, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_3(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_4(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x3, tmp4, xmask)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (96, 1, 7, 7), (49, 49, 7, 1))
assert_size_stride(primals_2, (96,), (1,))
assert_size_stride(primals_3, (4, 1, 128, 128), (16384, 16384, 128, 1))
assert_size_stride(primals_4, (64, 96, 5, 5), (2400, 25, 5, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (32, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (2, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_11, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(4,
4), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 96, 31, 31), (92256, 961, 31, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(369024)](buf1, primals_2,
369024, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 14, 14), (12544, 196, 14, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(50176)](buf3, primals_5,
50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.
float32)
buf5 = empty_strided_cuda((4, 64, 6, 6), (2304, 36, 6, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(9216)](buf3, buf4,
buf5, 9216, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf4, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 2, 2), (128, 4, 2, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_3[grid(512)](buf7, primals_7, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf8 = extern_kernels.convolution(buf7, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 32, 2, 2), (128, 4, 2, 1))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_3[grid(512)](buf9, primals_9, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf10 = extern_kernels.convolution(buf9, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 2, 2, 2), (8, 4, 2, 1))
buf11 = buf10
del buf10
buf12 = empty_strided_cuda((4, 2, 2, 2), (8, 4, 2, 1), torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_4[grid(32)](buf11,
primals_11, buf12, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_11
return (reinterpret_tensor(buf11, (4, 4, 2), (8, 2, 1), 0), primals_1,
primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3,
buf4, buf5, buf7, buf9, buf12)
class DiscriminatorNew(nn.Module):
def __init__(self, in_features=1):
super(DiscriminatorNew, self).__init__()
self.conv1 = nn.Conv2d(in_features, 96, kernel_size=7, stride=4)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(96, 64, kernel_size=5, stride=2)
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(32, 32, kernel_size=1, stride=1)
self.conv5 = nn.Conv2d(32, 2, kernel_size=1, stride=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| ajdillhoff/simgan-pytorch | Discriminator | false | 3,057 | [
"MIT"
] | 0 | fb61241a85136aae770944e1496f9319df327561 | https://github.com/ajdillhoff/simgan-pytorch/tree/fb61241a85136aae770944e1496f9319df327561 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self, in_features=1):
super().__init__()
self.conv1 = nn.Conv2d(in_features, 96, kernel_size=7, stride=4)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(96, 64, kernel_size=5, stride=2)
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(64, 32, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(32, 32, kernel_size=1, stride=1)
self.conv5 = nn.Conv2d(32, 2, kernel_size=1, stride=1)
def forward(self, x):
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.max_pool(out)
out = self.conv3(out)
out = self.relu(out)
out = self.conv4(out)
out = self.relu(out)
out = self.conv5(out)
out = self.relu(out)
return out.view(out.size(0), -1, 2)
def get_inputs():
return [torch.rand([4, 1, 128, 128])]
def get_init_inputs():
return []
|
Encoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/ix/cixxyusyg44s2hkoufcgbrv3ix5ookwqjl4ia3xkv7bdqi4yrzus.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ld/cld3palaxnfc7osbafgwkvp6zk52xgffwvw26mvcpxisjgbhf4qn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/uv/cuvrzwb3g3f2lpjm5qjbjpmqdjhj4n3dddkkb26kxlrvj2hmmaip.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/dm/cdmwswo2a3b5co6wkdwypnmj4gzx64sdfddgavtexpjem2lue6v7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu_3
# Graph fragment:
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 400), (400, 1))
assert_size_stride(primals_5, (200, ), (1, ))
assert_size_stride(primals_6, (100, 200), (200, 1))
assert_size_stride(primals_7, (100, ), (1, ))
assert_size_stride(primals_8, (4, 100), (100, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf0 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf11, 25600, grid=grid(25600), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(primals_4, (400, 200), (1, 400), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf2 # reuse
buf10 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf10, 12800, grid=grid(12800), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 100), (1, 200), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf4 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf9, 6400, grid=grid(6400), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 100), (100, 1), 0), reinterpret_tensor(primals_8, (100, 4), (1, 100), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf7, primals_9, buf8, 256, grid=grid(256), stream=stream0)
del primals_9
return (buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 400), (400, 1), 0), reinterpret_tensor(buf3, (64, 200), (200, 1), 0), reinterpret_tensor(buf5, (64, 100), (100, 1), 0), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((400, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((200, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((100, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
def __init__(self, input_shape, output_shape):
super(Encoder, self).__init__()
self.input_shape = input_shape
self.encoder_out_shape = output_shape
self.linear_one = nn.Linear(self.input_shape, 400)
self.linear_two = nn.Linear(400, 200)
self.linear_three = nn.Linear(200, 100)
self.linear_four = nn.Linear(100, self.encoder_out_shape)
def forward(self, x):
x = F.relu(self.linear_one(x))
x = F.relu(self.linear_two(x))
x = F.relu(self.linear_three(x))
x = F.relu(self.linear_four(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_shape': 4, 'output_shape': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (400, 4), (4, 1))
assert_size_stride(primals_2, (400,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 400), (400, 1))
assert_size_stride(primals_5, (200,), (1,))
assert_size_stride(primals_6, (100, 200), (200, 1))
assert_size_stride(primals_7, (100,), (1,))
assert_size_stride(primals_8, (4, 100), (100, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 400), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf0
buf11 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(25600)](buf1,
primals_2, buf11, 25600, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 400), (400, 1), 0),
reinterpret_tensor(primals_4, (400, 200), (1, 400), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf2
buf10 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(12800)](buf3,
primals_5, buf10, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_6, (200, 100), (1, 200), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf4
buf9 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(6400)](buf5,
primals_7, buf9, 6400, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_8, (100, 4), (1, 100), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(256)](buf7,
primals_9, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
return buf7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 400), (400, 1), 0
), reinterpret_tensor(buf3, (64, 200), (200, 1), 0
), reinterpret_tensor(buf5, (64, 100), (100, 1), 0
), buf8, primals_8, buf9, primals_6, buf10, primals_4, buf11
class EncoderNew(nn.Module):
def __init__(self, input_shape, output_shape):
super(EncoderNew, self).__init__()
self.input_shape = input_shape
self.encoder_out_shape = output_shape
self.linear_one = nn.Linear(self.input_shape, 400)
self.linear_two = nn.Linear(400, 200)
self.linear_three = nn.Linear(200, 100)
self.linear_four = nn.Linear(100, self.encoder_out_shape)
def forward(self, input_0):
primals_1 = self.linear_one.weight
primals_2 = self.linear_one.bias
primals_4 = self.linear_two.weight
primals_5 = self.linear_two.bias
primals_6 = self.linear_three.weight
primals_7 = self.linear_three.bias
primals_8 = self.linear_four.weight
primals_9 = self.linear_four.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| akaprasanga/AutoEncoder | Encoder | false | 3,058 | [
"MIT"
] | 0 | a1562a7a720c199b717796e469b9957eb958264a | https://github.com/akaprasanga/AutoEncoder/tree/a1562a7a720c199b717796e469b9957eb958264a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_shape, output_shape):
super().__init__()
self.input_shape = input_shape
self.encoder_out_shape = output_shape
self.linear_one = nn.Linear(self.input_shape, 400)
self.linear_two = nn.Linear(400, 200)
self.linear_three = nn.Linear(200, 100)
self.linear_four = nn.Linear(100, self.encoder_out_shape)
def forward(self, x):
x = F.relu(self.linear_one(x))
x = F.relu(self.linear_two(x))
x = F.relu(self.linear_three(x))
x = F.relu(self.linear_four(x))
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Decoder | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/5y/c5yq7wkgmmcygrawripwacy566sggsmh2mzk5izw35wk7ferohhu.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/ld/cld3palaxnfc7osbafgwkvp6zk52xgffwvw26mvcpxisjgbhf4qn.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/za/czalbrwcb6xxsn55bv3tbsakzyv6xzvg3h6a7qqmdno4es56w4gz.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_2 => relu_2
# Graph fragment:
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_2 = async_compile.triton('triton_poi_fused_relu_threshold_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = (xindex // 1600)
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x4), tmp4, xmask)
tl.store(out_ptr0 + (x2 + (1664*x3)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 100), (100, 1))
assert_size_stride(primals_5, (200, ), (1, ))
assert_size_stride(primals_6, (400, 200), (200, 1))
assert_size_stride(primals_7, (400, ), (1, ))
assert_size_stride(primals_8, (4, 400), (400, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0); del buf0 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf9, 6400, grid=grid(6400), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(primals_4, (100, 200), (1, 100), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 200), (3200, 800, 200, 1), 0); del buf2 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf3, primals_5, buf8, 12800, grid=grid(12800), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 200), (200, 1), 0), reinterpret_tensor(primals_6, (200, 400), (1, 200), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 400), (6400, 1600, 400, 1), 0); del buf4 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_2.run(buf5, primals_7, buf7, 25600, grid=grid(25600), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 400), (400, 1), 0), reinterpret_tensor(primals_8, (400, 4), (1, 400), 0), alpha=1, beta=1, out=buf6)
del primals_9
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 100), (100, 1), 0), reinterpret_tensor(buf3, (64, 200), (200, 1), 0), reinterpret_tensor(buf5, (64, 400), (400, 1), 0), primals_8, buf7, primals_6, buf8, primals_4, buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((200, 100), (100, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((200, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((400, 200), (200, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
def __init__(self, input_shape, output_shape):
super(Decoder, self).__init__()
self.input_shape = input_shape
self.decoder_out_shape = output_shape
self.linear_one = nn.Linear(self.input_shape, 100)
self.linear_two = nn.Linear(100, 200)
self.linear_three = nn.Linear(200, 400)
self.linear_four = nn.Linear(400, self.decoder_out_shape)
def forward(self, x):
x = F.relu(self.linear_one(x))
x = F.relu(self.linear_two(x))
x = F.relu(self.linear_three(x))
x = self.linear_four(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_shape': 4, 'output_shape': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 100
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 200
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_2(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 25600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 400
x2 = xindex % 1600
x3 = xindex // 1600
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x4, tmp4, xmask)
tl.store(out_ptr0 + (x2 + 1664 * x3), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (100, 4), (4, 1))
assert_size_stride(primals_2, (100,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (200, 100), (100, 1))
assert_size_stride(primals_5, (200,), (1,))
assert_size_stride(primals_6, (400, 200), (200, 1))
assert_size_stride(primals_7, (400,), (1,))
assert_size_stride(primals_8, (4, 400), (400, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 100), (100, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 100), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 100), (1600, 400, 100, 1), 0)
del buf0
buf9 = empty_strided_cuda((4, 4, 4, 100), (1664, 400, 100, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(6400)](buf1,
primals_2, buf9, 6400, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 200), (200, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 100), (100, 1), 0),
reinterpret_tensor(primals_4, (100, 200), (1, 100), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 200), (3200, 800, 200, 1), 0)
del buf2
buf8 = empty_strided_cuda((4, 4, 4, 200), (3200, 800, 200, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(12800)](buf3,
primals_5, buf8, 12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 200), (200, 1), 0),
reinterpret_tensor(primals_6, (200, 400), (1, 200), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 400), (6400, 1600, 400, 1), 0
)
del buf4
buf7 = empty_strided_cuda((4, 4, 4, 400), (6656, 1664, 400, 1),
torch.bool)
triton_poi_fused_relu_threshold_backward_2[grid(25600)](buf5,
primals_7, buf7, 25600, XBLOCK=128, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_9, reinterpret_tensor(buf5, (64, 400),
(400, 1), 0), reinterpret_tensor(primals_8, (400, 4), (1, 400),
0), alpha=1, beta=1, out=buf6)
del primals_9
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 100), (100, 1), 0
), reinterpret_tensor(buf3, (64, 200), (200, 1), 0
), reinterpret_tensor(buf5, (64, 400), (400, 1), 0
), primals_8, buf7, primals_6, buf8, primals_4, buf9
class DecoderNew(nn.Module):
def __init__(self, input_shape, output_shape):
super(DecoderNew, self).__init__()
self.input_shape = input_shape
self.decoder_out_shape = output_shape
self.linear_one = nn.Linear(self.input_shape, 100)
self.linear_two = nn.Linear(100, 200)
self.linear_three = nn.Linear(200, 400)
self.linear_four = nn.Linear(400, self.decoder_out_shape)
def forward(self, input_0):
primals_1 = self.linear_one.weight
primals_2 = self.linear_one.bias
primals_4 = self.linear_two.weight
primals_5 = self.linear_two.bias
primals_6 = self.linear_three.weight
primals_7 = self.linear_three.bias
primals_8 = self.linear_four.weight
primals_9 = self.linear_four.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| akaprasanga/AutoEncoder | Decoder | false | 3,059 | [
"MIT"
] | 0 | a1562a7a720c199b717796e469b9957eb958264a | https://github.com/akaprasanga/AutoEncoder/tree/a1562a7a720c199b717796e469b9957eb958264a | import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input_shape, output_shape):
super().__init__()
self.input_shape = input_shape
self.decoder_out_shape = output_shape
self.linear_one = nn.Linear(self.input_shape, 100)
self.linear_two = nn.Linear(100, 200)
self.linear_three = nn.Linear(200, 400)
self.linear_four = nn.Linear(400, self.decoder_out_shape)
def forward(self, x):
x = F.relu(self.linear_one(x))
x = F.relu(self.linear_two(x))
x = F.relu(self.linear_three(x))
x = self.linear_four(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
Model | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf6, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf5, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
return (reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input, hidden, output):
super(Model, self).__init__()
self.l1 = nn.Linear(input, hidden)
self.l2 = nn.Linear(hidden, hidden)
self.l3 = nn.Linear(hidden, 2)
def forward(self, x):
out = F.relu(self.l1(x))
out = F.relu(self.l2(out))
out = self.l3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input': 4, 'hidden': 4, 'output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (2, 4), (4, 1))
assert_size_stride(primals_7, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf6, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 2), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
return reinterpret_tensor(buf4, (4, 4, 4, 2), (32, 8, 2, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), primals_6, buf5, primals_4, buf6
class ModelNew(nn.Module):
def __init__(self, input, hidden, output):
super(ModelNew, self).__init__()
self.l1 = nn.Linear(input, hidden)
self.l2 = nn.Linear(hidden, hidden)
self.l3 = nn.Linear(hidden, 2)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| akapoorx00/machinelearning-stuff | Model | false | 3,060 | [
"Apache-2.0"
] | 0 | 53184019b77d3387fd15b13d3bfa75529b8ed003 | https://github.com/akapoorx00/machinelearning-stuff/tree/53184019b77d3387fd15b13d3bfa75529b8ed003 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input, hidden, output):
super(Model, self).__init__()
self.l1 = nn.Linear(input, hidden)
self.l2 = nn.Linear(hidden, hidden)
self.l3 = nn.Linear(hidden, 2)
def forward(self, x):
out = F.relu(self.l1(x))
out = F.relu(self.l2(out))
out = self.l3(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
DiscShiftLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/re/crek4nuwb2qmcio2vdn52756wis557jmlcfgmxomsorwtitkl7tg.py
# Topologically Sorted Source Nodes: [pow_1, loss, mul], Original ATen: [aten.pow, aten.mean, aten.mul]
# Source node to ATen node mapping:
# loss => mean
# mul => mul
# pow_1 => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.1), kwargs = {})
triton_per_fused_mean_mul_pow_0 = async_compile.triton('triton_per_fused_mean_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_pow_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 256.0
tmp6 = tmp4 / tmp5
tmp7 = 0.1
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [pow_1, loss, mul], Original ATen: [aten.pow, aten.mean, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_mean_mul_pow_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiscShiftLoss(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Tensor with shape (n, c, h, w)
Returns:
Tensor: Loss.
"""
loss = torch.mean(x ** 2)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mean_mul_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [RBLOCK])
tmp4 = triton_helpers.promote_to_tensor(tl.sum(tmp2, 0))
tmp5 = 256.0
tmp6 = tmp4 / tmp5
tmp7 = 0.1
tmp8 = tmp6 * tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_mul_pow_0[grid(1)](buf1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class DiscShiftLossNew(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| akimotty877/mmediting | DiscShiftLoss | false | 3,061 | [
"Apache-2.0"
] | 0 | cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Tensor with shape (n, c, h, w)
Returns:
Tensor: Loss.
"""
loss = torch.mean(x ** 2)
return loss * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/bg/cbg32drchyezvbfwshguvyopixmzwi2llws7xkhvpdruis76tr2t.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_4 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_9, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_9, %amax), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/oo/coo5rivaroinv27r7to5gs4jb7ce7itar6epfsastoa2ig6tj65k.py
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# out_4 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf14, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf13, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf5, primals_7, buf12, 256, grid=grid(256), stream=stream0)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf7, primals_9, buf11, 256, grid=grid(256), stream=stream0)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf8)
del primals_11
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf8, buf9, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf9, buf10, 256, grid=grid(256), stream=stream0)
del buf9
return (buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf10, primals_10, buf11, primals_8, buf12, primals_6, buf13, primals_4, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, input, hidden, output):
super(Net, self).__init__()
self.l1 = nn.Linear(input, hidden)
self.l2 = nn.Linear(hidden, hidden)
self.l3 = nn.Linear(hidden, hidden)
self.l4 = nn.Linear(hidden, hidden)
self.l5 = nn.Linear(hidden, output)
def forward(self, x):
out = F.relu(self.l1(x))
out = F.relu(self.l2(out))
out = F.relu(self.l3(out))
out = F.relu(self.l4(out))
out = F.log_softmax(self.l5(out), dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input': 4, 'hidden': 4, 'output': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf14, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf13 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf13, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf5,
primals_7, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf7,
primals_9, buf11, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf8 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf7, (64, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf8)
del primals_11
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_1[grid(256)](buf8, buf9, 256, XBLOCK=
256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused__log_softmax_2[grid(256)](buf9, buf10, 256, XBLOCK
=256, num_warps=4, num_stages=1)
del buf9
return (buf10, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), reinterpret_tensor(buf5, (64, 4), (4, 1),
0), reinterpret_tensor(buf7, (64, 4), (4, 1), 0), buf10, primals_10,
buf11, primals_8, buf12, primals_6, buf13, primals_4, buf14)
class NetNew(nn.Module):
def __init__(self, input, hidden, output):
super(NetNew, self).__init__()
self.l1 = nn.Linear(input, hidden)
self.l2 = nn.Linear(hidden, hidden)
self.l3 = nn.Linear(hidden, hidden)
self.l4 = nn.Linear(hidden, hidden)
self.l5 = nn.Linear(hidden, output)
def forward(self, input_0):
primals_1 = self.l1.weight
primals_2 = self.l1.bias
primals_4 = self.l2.weight
primals_5 = self.l2.bias
primals_6 = self.l3.weight
primals_7 = self.l3.bias
primals_8 = self.l4.weight
primals_9 = self.l4.bias
primals_10 = self.l5.weight
primals_11 = self.l5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| akapoorx00/machinelearning-stuff | Net | false | 3,062 | [
"Apache-2.0"
] | 0 | 53184019b77d3387fd15b13d3bfa75529b8ed003 | https://github.com/akapoorx00/machinelearning-stuff/tree/53184019b77d3387fd15b13d3bfa75529b8ed003 | import torch
from torch import nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, input, hidden, output):
super().__init__()
self.l1 = nn.Linear(input, hidden)
self.l2 = nn.Linear(hidden, hidden)
self.l3 = nn.Linear(hidden, hidden)
self.l4 = nn.Linear(hidden, hidden)
self.l5 = nn.Linear(hidden, output)
def forward(self, x):
out = F.relu(self.l1(x))
out = F.relu(self.l2(out))
out = F.relu(self.l3(out))
out = F.relu(self.l4(out))
out = F.log_softmax(self.l5(out), dim=1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4, 4]
|
CharbonnierCompLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/lc/clcduj6g2ymqc7h3ffa4va3domvxzvdiu55ogmltuljc3midllwe.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, sub_1, pow_1, add_1, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sqrt, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# loss => sqrt
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pow_1 => pow_1
# pred_merged => add
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 1e-12
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = tmp18 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, sub_1, pow_1, add_1, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.pow, aten.sqrt, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target) ** 2 + eps)
class CharbonnierCompLoss(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False,
eps=1e-12):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * charbonnier_loss(pred_merged, ori_merged,
weight, eps=self.eps, reduction=self.reduction, sample_wise=
self.sample_wise)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import functools
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 1e-12
tmp12 = tmp10 + tmp11
tmp13 = libdevice.sqrt(tmp12)
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = tmp18 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mul_pow_rsub_sqrt_sub_0[grid(1)](buf1,
arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target) ** 2 + eps)
class CharbonnierCompLossNew(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False,
eps=1e-12):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| akimotty877/mmediting | CharbonnierCompLoss | false | 3,063 | [
"Apache-2.0"
] | 0 | cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target) ** 2 + eps)
class Model(nn.Module):
"""Charbonnier composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
# ... truncated (>4000 chars) for memory efficiency |
BboxHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/sh/cshv7keytg5sr5xrzp3o2zhhmbts22uo6atoscwlltelsovbnd6o.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 48
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 12
y1 = (yindex // 12)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (12*x2) + (49152*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12))
buf2 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf1, primals_2, buf2, 48, 4096, grid=grid(48, 4096), stream=stream0)
del buf1
del primals_2
return (buf2, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.nn
class BboxHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
return out
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 48
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 12
y1 = yindex // 12
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 49152 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12))
buf2 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_1[grid(48, 4096)](buf1, primals_2,
buf2, 48, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf1
del primals_2
return buf2, primals_1, buf0
class BboxHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| ZongqingHou/Pytorch_Retinaface | BboxHead | false | 3,064 | [
"MIT"
] | 0 | 6284b7158a0d9d3d4a2cc267a393c21863a1b938 | https://github.com/ZongqingHou/Pytorch_Retinaface/tree/6284b7158a0d9d3d4a2cc267a393c21863a1b938 | import torch
from torch import nn
import torch.nn
class Model(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super().__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
return out
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return []
|
MSECompositionLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/bv/cbvrs5xmjnaa5kevipbtguhjfkbeqz3fqzbe2wyky7pkkyvy5djt.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.mse_loss, aten.mean]
# Source node to ATen node mapping:
# loss => pow_1, sub_1
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pred_merged => add
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg3_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_add_mean_mse_loss_mul_rsub_0 = async_compile.triton('triton_per_fused_add_mean_mse_loss_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mse_loss_mul_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_mse_loss_mul_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.mse_loss, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_mse_loss_mul_rsub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
class MSECompositionLoss(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * mse_loss(pred_merged, ori_merged, weight,
reduction=self.reduction, sample_wise=self.sample_wise)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import functools
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_mse_loss_mul_rsub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_mean_mse_loss_mul_rsub_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
class MSECompositionLossNew(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| akimotty877/mmediting | MSECompositionLoss | false | 3,065 | [
"Apache-2.0"
] | 0 | cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
class Model(nn.Module):
"""MSE (L2) composition loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward(
# ... truncated (>4000 chars) for memory efficiency |
L1CompositionLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/am/camwpeehokwg5kta3nxp4pdqaj4l6nldns4yhguxa3yy3jfhld4k.py
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.abs, aten.mean]
# Source node to ATen node mapping:
# loss => abs_1, sub_1
# loss_1 => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# pred_merged => add
# sub => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg2_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %arg3_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%abs_1,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
triton_per_fused_abs_add_mean_mul_rsub_sub_0 = async_compile.triton('triton_per_fused_abs_add_mean_mul_rsub_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_abs_add_mean_mul_rsub_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_abs_add_mean_mul_rsub_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp5 = tl.load(in_ptr2 + (r0), None)
tmp8 = tl.load(in_ptr3 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.abs(tmp9)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sub, mul_1, pred_merged, loss, loss_1, mul_2], Original ATen: [aten.mul, aten.rsub, aten.add, aten.sub, aten.abs, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_abs_add_mean_mul_rsub_sub_0.run(buf1, arg0_1, arg1_1, arg2_1, arg3_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
class L1CompositionLoss(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred_alpha, fg, bg, ori_merged, weight=None, **kwargs):
"""
Args:
pred_alpha (Tensor): of shape (N, 1, H, W). Predicted alpha matte.
fg (Tensor): of shape (N, 3, H, W). Tensor of foreground object.
bg (Tensor): of shape (N, 3, H, W). Tensor of background object.
ori_merged (Tensor): of shape (N, 3, H, W). Tensor of origin merged
image before normalized by ImageNet mean and std.
weight (Tensor, optional): of shape (N, 1, H, W). It is an
indicating matrix: weight[trimap == 128] = 1. Default: None.
"""
pred_merged = pred_alpha * fg + (1.0 - pred_alpha) * bg
if weight is not None:
weight = weight.expand(-1, 3, -1, -1)
return self.loss_weight * l1_loss(pred_merged, ori_merged, weight,
reduction=self.reduction, sample_wise=self.sample_wise)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import functools
import torch.nn as nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_abs_add_mean_mul_rsub_sub_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp5 = tl.load(in_ptr2 + r0, None)
tmp8 = tl.load(in_ptr3 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp9 = tmp7 - tmp8
tmp10 = tl_math.abs(tmp9)
tmp11 = tl.broadcast_to(tmp10, [RBLOCK])
tmp13 = triton_helpers.promote_to_tensor(tl.sum(tmp11, 0))
tmp14 = 256.0
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp3
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_abs_add_mean_mul_rsub_sub_0[grid(1)](buf1, arg0_1,
arg1_1, arg2_1, arg3_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf1,
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
class L1CompositionLossNew(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(
f'Unsupported reduction mode: {reduction}. Supported ones are: {_reduction_modes}'
)
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| akimotty877/mmediting | L1CompositionLoss | false | 3,066 | [
"Apache-2.0"
] | 0 | cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | import functools
import torch
import torch.nn as nn
from torch.nn import functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
elif reduction == 'mean':
if weight.size(1) == 1:
weight = weight.expand_as(loss)
eps = 1e-12
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True)
loss = (loss / (weight + eps)).sum() / weight.size(0)
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', sample_wise=
False, **kwargs):
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
class Model(nn.Module):
"""L1 composition loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not
# ... truncated (>4000 chars) for memory efficiency |
CustomBatchNormManualModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7p/c7pi22wc75ruzdj7d7noylhwbkdfv2byadzt3clft347fdidyto4.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.pow, aten.div, aten.mul]
# Source node to ATen node mapping:
# out => add, add_1, div, mean, mul, pow_1, sub, var
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0]), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 0})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %pow_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + (x3), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.pow, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
"""
mu = torch.mean(input, 0)
var = torch.var(input, dim=0, unbiased=False)
x_hat = (input - mu) / (var + eps) ** 0.5
out = x_hat * gamma + beta
ctx.save_for_backward(input, x_hat, out, gamma, beta, mu, var)
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
"""
x, x_hat, _y, gamma, _beta, _mu, var = ctx.saved_tensors
grad_gamma = torch.sum(grad_output * x_hat, 0) if ctx.needs_input_grad[
1] else None
grad_beta = torch.sum(grad_output, 0) if ctx.needs_input_grad[2
] else None
grad_input = gamma * (1 / torch.sqrt(var)) / x.shape[0] * (x.shape[
0] * grad_output - x_hat * grad_gamma - grad_beta
) if ctx.needs_input_grad[0] else None
return grad_input, grad_gamma, grad_beta, None
class CustomBatchNormManualModule(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormManualModule, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
def forward(self, input):
"""
Compute the batch normalization via CustomBatchNormManualFunction
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
assert input.shape[1] == self.n_neurons, 'Input has incorrect shape'
out = CustomBatchNormManualFunction().apply(input, self.gamma, self
.beta, self.eps)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_neurons': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x3, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sub_var_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
"""
mu = torch.mean(input, 0)
var = torch.var(input, dim=0, unbiased=False)
x_hat = (input - mu) / (var + eps) ** 0.5
out = x_hat * gamma + beta
ctx.save_for_backward(input, x_hat, out, gamma, beta, mu, var)
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
"""
x, x_hat, _y, gamma, _beta, _mu, var = ctx.saved_tensors
grad_gamma = torch.sum(grad_output * x_hat, 0) if ctx.needs_input_grad[
1] else None
grad_beta = torch.sum(grad_output, 0) if ctx.needs_input_grad[2
] else None
grad_input = gamma * (1 / torch.sqrt(var)) / x.shape[0] * (x.shape[
0] * grad_output - x_hat * grad_gamma - grad_beta
) if ctx.needs_input_grad[0] else None
return grad_input, grad_gamma, grad_beta, None
class CustomBatchNormManualModuleNew(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormManualModuleNew, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| akashrajkn/sarcastic-gradients | CustomBatchNormManualModule | false | 3,067 | [
"Apache-2.0"
] | 0 | 5a995ab7822dfd49cdc88855c631dcc8f1b0532f | https://github.com/akashrajkn/sarcastic-gradients/tree/5a995ab7822dfd49cdc88855c631dcc8f1b0532f | import torch
import torch.nn as nn
class CustomBatchNormManualFunction(torch.autograd.Function):
"""
This torch.autograd.Function implements a functional custom version of the batch norm operation for MLPs.
Using torch.autograd.Function allows you to write a custom backward function.
The function will be called from the nn.Module CustomBatchNormManualModule
Inside forward the tensors are (automatically) not recorded for automatic differentiation since the backward
pass is done via the backward method.
The forward pass is not called directly but via the apply() method. This makes sure that the context objects
are dealt with correctly. Example:
my_bn_fct = CustomBatchNormManualFunction()
normalized = fct.apply(input, gamma, beta, eps)
"""
@staticmethod
def forward(ctx, input, gamma, beta, eps=1e-05):
"""
Compute the batch normalization
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
input: input tensor of shape (n_batch, n_neurons)
gamma: variance scaling tensor, applied per neuron, shpae (n_neurons)
beta: mean bias tensor, applied per neuron, shpae (n_neurons)
eps: small float added to the variance for stability
Returns:
out: batch-normalized tensor
"""
mu = torch.mean(input, 0)
var = torch.var(input, dim=0, unbiased=False)
x_hat = (input - mu) / (var + eps) ** 0.5
out = x_hat * gamma + beta
ctx.save_for_backward(input, x_hat, out, gamma, beta, mu, var)
return out
@staticmethod
def backward(ctx, grad_output):
"""
Compute backward pass of the batch normalization.
Args:
ctx: context object handling storing and retrival of tensors and constants and specifying
whether tensors need gradients in backward pass
Returns:
out: tuple containing gradients for all input arguments
"""
x, x_hat, _y, gamma, _beta, _mu, var = ctx.saved_tensors
grad_gamma = torch.sum(grad_output * x_hat, 0) if ctx.needs_input_grad[
1] else None
grad_beta = torch.sum(grad_output, 0) if ctx.needs_input_grad[2
] else None
grad_input = gamma * (1 / torch.sqrt(var)) / x.shape[0] * (x.shape[
0] * grad_output - x_hat * grad_gamma - grad_beta
) if ctx.needs_input_grad[0] else None
return grad_input, grad_gamma, grad_beta, None
class Model(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
In self.forward the functional version CustomBatchNormManualFunction.forward is called.
The automatic differentiation of PyTorch calls the backward method of this function in the backward pass.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormManualModule object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super().__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gamma = nn.Parameter(torch.ones(self.n_neurons))
self.beta = nn.Parameter(torch.zeros(self.n_neurons))
def forward(self, input):
"""
Compute the batch normalization via CustomBatchNormManualFunction
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
assert input.shape[1] == self.n_neurons, 'Input has incorrect shape'
out = CustomBatchNormManualFunction().apply(input, self.gamma, self
.beta, self.eps)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
EqualLinearActModule | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/4q/c4qwjg3oiykrck3lufgrneimjnjyzxvb3c3h6obmpachvwty6igj.py
# Topologically Sorted Source Nodes: [sqrt, mul_1, weight], Original ATen: [aten.sqrt, aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# sqrt => full_default_1
# weight => mul_2
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %full_default_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 1.0), kwargs = {})
triton_poi_fused_mul_sqrt_0 = async_compile.triton('triton_poi_fused_mul_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py
# Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_3 => mul_3
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sqrt, mul_1, weight], Original ATen: [aten.sqrt, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sqrt_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul]
extern_kernels.addmm(buf1, primals_1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
return (buf2, buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from copy import deepcopy
from functools import partial
from torch.nn.init import _calculate_correct_fan
def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in',
lr_mul=1.0):
"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\\mathcal{N}(0, 1)`.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul)
return module
class EqualizedLR:
"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\\mathcal{N}(0, 1)`.
Args:
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
"""
def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0
):
self.name = name
self.mode = mode
self.gain = gain
self.lr_mul = lr_mul
def compute_weight(self, module):
"""Compute weight with equalized learning rate.
Args:
module (nn.Module): A module that is wrapped with equalized lr.
Returns:
torch.Tensor: Updated weight.
"""
weight = getattr(module, self.name + '_orig')
if weight.ndim == 5:
fan = _calculate_correct_fan(weight[0], self.mode)
else:
assert weight.ndim <= 4
fan = _calculate_correct_fan(weight, self.mode)
weight = weight * torch.tensor(self.gain, device=weight.device
) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device)
) * self.lr_mul
return weight
def __call__(self, module, inputs):
"""Standard interface for forward pre hooks."""
setattr(module, self.name, self.compute_weight(module))
@staticmethod
def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0):
"""Apply function.
This function is to register an equalized learning rate hook in an
``nn.Module``.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
for _, hook in module._forward_pre_hooks.items():
if isinstance(hook, EqualizedLR):
raise RuntimeError(
f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.'
)
fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul)
weight = module._parameters[name]
delattr(module, name)
module.register_parameter(name + '_orig', weight)
setattr(module, name, weight.data)
module.register_forward_pre_hook(fn)
return fn
class EqualizedLRLinearModule(nn.Linear):
"""Equalized LR LinearModule.
In this module, we adopt equalized lr in ``nn.Linear``. The equalized
learning rate is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Note that, the initialization of ``self.weight`` will be overwritten as
:math:`\\mathcal{N}(0, 1)`.
Args:
equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``.
If ``None``, equalized learning rate is ignored. Defaults to
dict(mode='fan_in').
"""
def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs):
super(EqualizedLRLinearModule, self).__init__(*args, **kwargs)
self.with_equlized_lr = equalized_lr_cfg is not None
if self.with_equlized_lr:
self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0)
else:
self.lr_mul = 1.0
if self.with_equlized_lr:
equalized_lr(self, **equalized_lr_cfg)
self._init_linear_weights()
def _init_linear_weights(self):
"""Initialize linear weights as described in PGGAN."""
nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class EqualLinearActModule(nn.Module):
"""Equalized LR Linear Module with Activation Layer.
Args:
nn ([type]): [description]
"""
def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0),
bias=True, bias_init=0.0, act_cfg=None, **kwargs):
super(EqualLinearActModule, self).__init__()
self.with_activation = act_cfg is not None
self.linear = EqualizedLRLinearModule(*args, bias=False,
equalized_lr_cfg=equalized_lr_cfg, **kwargs)
if equalized_lr_cfg is not None:
self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0)
else:
self.lr_mul = 1.0
if bias:
self.bias = nn.Parameter(torch.zeros(self.linear.out_features).
fill_(bias_init))
else:
self.bias = None
if self.with_activation:
act_cfg = deepcopy(act_cfg)
if act_cfg['type'] == 'fused_bias':
self.act_type = act_cfg.pop('type')
assert self.bias is not None
self.activate = partial(fused_bias_leakyrelu, **act_cfg)
else:
self.act_type = 'normal'
self.activate = build_activation_layer(act_cfg)
else:
self.act_type = None
def forward(self, x):
if x.ndim >= 3:
x = x.reshape(x.size(0), -1)
x = self.linear(x)
if self.with_activation and self.act_type == 'fused_bias':
x = self.activate(x, self.bias * self.lr_mul)
elif self.bias is not None and self.with_activation:
x = self.activate(x + self.bias * self.lr_mul)
elif self.bias is not None:
x = x + self.bias * self.lr_mul
elif self.with_activation:
x = self.activate(x)
return x
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from copy import deepcopy
from functools import partial
from torch.nn.init import _calculate_correct_fan
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sqrt_0[grid(16)](primals_2, buf0, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(buf1, primals_1, reinterpret_tensor(buf0, (4,
4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
return buf2, buf0, primals_1
def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in',
lr_mul=1.0):
"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\\mathcal{N}(0, 1)`.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul)
return module
class EqualizedLR:
"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\\mathcal{N}(0, 1)`.
Args:
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
"""
def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0
):
self.name = name
self.mode = mode
self.gain = gain
self.lr_mul = lr_mul
def compute_weight(self, module):
"""Compute weight with equalized learning rate.
Args:
module (nn.Module): A module that is wrapped with equalized lr.
Returns:
torch.Tensor: Updated weight.
"""
weight = getattr(module, self.name + '_orig')
if weight.ndim == 5:
fan = _calculate_correct_fan(weight[0], self.mode)
else:
assert weight.ndim <= 4
fan = _calculate_correct_fan(weight, self.mode)
weight = weight * torch.tensor(self.gain, device=weight.device
) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device)
) * self.lr_mul
return weight
def __call__(self, module, inputs):
"""Standard interface for forward pre hooks."""
setattr(module, self.name, self.compute_weight(module))
@staticmethod
def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0):
"""Apply function.
This function is to register an equalized learning rate hook in an
``nn.Module``.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
for _, hook in module._forward_pre_hooks.items():
if isinstance(hook, EqualizedLR):
raise RuntimeError(
f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.'
)
fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul)
weight = module._parameters[name]
delattr(module, name)
module.register_parameter(name + '_orig', weight)
setattr(module, name, weight.data)
module.register_forward_pre_hook(fn)
return fn
class EqualizedLRLinearModule(nn.Linear):
"""Equalized LR LinearModule.
In this module, we adopt equalized lr in ``nn.Linear``. The equalized
learning rate is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
Note that, the initialization of ``self.weight`` will be overwritten as
:math:`\\mathcal{N}(0, 1)`.
Args:
equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``.
If ``None``, equalized learning rate is ignored. Defaults to
dict(mode='fan_in').
"""
def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs):
super(EqualizedLRLinearModule, self).__init__(*args, **kwargs)
self.with_equlized_lr = equalized_lr_cfg is not None
if self.with_equlized_lr:
self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0)
else:
self.lr_mul = 1.0
if self.with_equlized_lr:
equalized_lr(self, **equalized_lr_cfg)
self._init_linear_weights()
def _init_linear_weights(self):
"""Initialize linear weights as described in PGGAN."""
nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
class EqualLinearActModuleNew(nn.Module):
"""Equalized LR Linear Module with Activation Layer.
Args:
nn ([type]): [description]
"""
def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0),
bias=True, bias_init=0.0, act_cfg=None, **kwargs):
super(EqualLinearActModuleNew, self).__init__()
self.with_activation = act_cfg is not None
self.linear = EqualizedLRLinearModule(*args, bias=False,
equalized_lr_cfg=equalized_lr_cfg, **kwargs)
if equalized_lr_cfg is not None:
self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0)
else:
self.lr_mul = 1.0
if bias:
self.bias = nn.Parameter(torch.zeros(self.linear.out_features).
fill_(bias_init))
else:
self.bias = None
if self.with_activation:
act_cfg = deepcopy(act_cfg)
if act_cfg['type'] == 'fused_bias':
self.act_type = act_cfg.pop('type')
assert self.bias is not None
self.activate = partial(fused_bias_leakyrelu, **act_cfg)
else:
self.act_type = 'normal'
self.activate = build_activation_layer(act_cfg)
else:
self.act_type = None
def forward(self, input_0):
primals_3 = self.bias
primals_1 = self.linear.weight_orig
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| akimotty877/mmediting | EqualLinearActModule | false | 3,068 | [
"Apache-2.0"
] | 0 | cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | import torch
import torch.nn as nn
from copy import deepcopy
from functools import partial
from torch.nn.init import _calculate_correct_fan
def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in',
lr_mul=1.0):
"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\\mathcal{N}(0, 1)`.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul)
return module
class EqualizedLR:
"""Equalized Learning Rate.
This trick is proposed in:
Progressive Growing of GANs for Improved Quality, Stability, and Variation
The general idea is to dynamically rescale the weight in training instead
of in initializing so that the variance of the responses in each layer is
guaranteed with some statistical properties.
Note that this function is always combined with a convolution module which
is initialized with :math:`\\mathcal{N}(0, 1)`.
Args:
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
"""
def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0
):
self.name = name
self.mode = mode
self.gain = gain
self.lr_mul = lr_mul
def compute_weight(self, module):
"""Compute weight with equalized learning rate.
Args:
module (nn.Module): A module that is wrapped with equalized lr.
Returns:
torch.Tensor: Updated weight.
"""
weight = getattr(module, self.name + '_orig')
if weight.ndim == 5:
fan = _calculate_correct_fan(weight[0], self.mode)
else:
assert weight.ndim <= 4
fan = _calculate_correct_fan(weight, self.mode)
weight = weight * torch.tensor(self.gain, device=weight.device
) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device)
) * self.lr_mul
return weight
def __call__(self, module, inputs):
"""Standard interface for forward pre hooks."""
setattr(module, self.name, self.compute_weight(module))
@staticmethod
def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0):
"""Apply function.
This function is to register an equalized learning rate hook in an
``nn.Module``.
Args:
module (nn.Module): Module to be wrapped.
name (str | optional): The name of weights. Defaults to 'weight'.
mode (str, optional): The mode of computing ``fan`` which is the
same as ``kaiming_init`` in pytorch. You can choose one from
['fan_in', 'fan_out']. Defaults to 'fan_in'.
Returns:
nn.Module: Module that is registered with equalized lr hook.
"""
for _, hook in module._forward_pre_hooks.items():
if isinstance(hook, EqualizedLR):
raise RuntimeError(
f'Cannot register two equalized_lr hooks on the same parameter {nam
# ... truncated (>4000 chars) for memory efficiency |
CustomBatchNormAutograd | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7p/c7pi22wc75ruzdj7d7noylhwbkdfv2byadzt3clft347fdidyto4.py
# Topologically Sorted Source Nodes: [mu, var, sub, add, pow_1, x, mul, out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.pow, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mu => mean
# mul => mul
# out => add_1
# pow_1 => pow_1
# sub => sub
# var => var
# x => div
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [0]), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [0]), kwargs = {correction: 0})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %pow_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sub_var_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + (x3), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu, var, sub, add, pow_1, x, mul, out], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.pow, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CustomBatchNormAutograd(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True. The backward pass does not need to be implemented, it
is dealt with by the automatic differentiation provided by PyTorch.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormAutograd, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gamma = nn.Parameter(torch.ones(n_neurons))
self.beta = nn.Parameter(torch.zeros(n_neurons))
def forward(self, input):
"""
Compute the batch normalization
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
assert input.shape[1] == self.n_neurons, 'Input has incorrect shape'
mu = torch.mean(input, 0)
var = torch.var(input, dim=0, unbiased=False)
x = (input - mu) / (var + self.eps) ** 0.5
out = x * self.gamma + self.beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_neurons': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sub_var_0(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex % 64
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp21 / tmp8
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.sqrt(tmp24)
tmp26 = tmp10 / tmp25
tmp28 = tmp26 * tmp27
tmp30 = tmp28 + tmp29
tl.store(out_ptr0 + x3, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sub_var_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class CustomBatchNormAutogradNew(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True. The backward pass does not need to be implemented, it
is dealt with by the automatic differentiation provided by PyTorch.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super(CustomBatchNormAutogradNew, self).__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gamma = nn.Parameter(torch.ones(n_neurons))
self.beta = nn.Parameter(torch.zeros(n_neurons))
def forward(self, input_0):
primals_2 = self.gamma
primals_3 = self.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| akashrajkn/sarcastic-gradients | CustomBatchNormAutograd | false | 3,069 | [
"Apache-2.0"
] | 0 | 5a995ab7822dfd49cdc88855c631dcc8f1b0532f | https://github.com/akashrajkn/sarcastic-gradients/tree/5a995ab7822dfd49cdc88855c631dcc8f1b0532f | import torch
import torch.nn as nn
class Model(nn.Module):
"""
This nn.module implements a custom version of the batch norm operation for MLPs.
The operations called in self.forward track the history if the input tensors have the
flag requires_grad set to True. The backward pass does not need to be implemented, it
is dealt with by the automatic differentiation provided by PyTorch.
"""
def __init__(self, n_neurons, eps=1e-05):
"""
Initializes CustomBatchNormAutograd object.
Args:
n_neurons: int specifying the number of neurons
eps: small float to be added to the variance for stability
"""
super().__init__()
self.n_neurons = n_neurons
self.eps = eps
self.gamma = nn.Parameter(torch.ones(n_neurons))
self.beta = nn.Parameter(torch.zeros(n_neurons))
def forward(self, input):
"""
Compute the batch normalization
Args:
input: input tensor of shape (n_batch, n_neurons)
Returns:
out: batch-normalized tensor
"""
assert input.shape[1] == self.n_neurons, 'Input has incorrect shape'
mu = torch.mean(input, 0)
var = torch.var(input, dim=0, unbiased=False)
x = (input - mu) / (var + self.eps) ** 0.5
out = x * self.gamma + self.beta
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4]
|
MaxFeature | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/az/cazxolgp2ne6vc522yhqcdzkhjb6btel7txdrpwzpkcc5t6sm46x.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.maximum, aten.eq, aten.gt, aten.lt]
# Source node to ATen node mapping:
# max_1 => maximum
# Graph fragment:
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%getitem, %getitem_1), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%getitem, %getitem_1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Tensor](args = (%getitem, %getitem_1), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Tensor](args = (%getitem, %getitem_1), kwargs = {})
triton_poi_fused_eq_gt_lt_maximum_0 = async_compile.triton('triton_poi_fused_eq_gt_lt_maximum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_gt_lt_maximum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_gt_lt_maximum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 64)
x3 = xindex % 64
x1 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + (128*x2)), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3 + (128*x2)), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp7 = tmp2 == tmp5
tmp8 = tmp2 > tmp5
tmp9 = tmp2 < tmp5
tl.store(out_ptr0 + (x4), tmp6, xmask)
tl.store(out_ptr1 + (x4), tmp7, xmask)
tl.store(out_ptr2 + (x4), tmp8, xmask)
tl.store(out_ptr3 + (x4), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.maximum, aten.eq, aten.gt, aten.lt]
stream0 = get_raw_stream(0)
triton_poi_fused_eq_gt_lt_maximum_0.run(buf0, primals_2, buf1, buf2, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf0
del primals_2
return (buf1, primals_1, primals_3, buf2, buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MaxFeature(nn.Module):
"""Conv2d or Linear layer with max feature selector
Generate feature maps with double channels, split them and select the max
feature.
Args:
in_channels (int): Channel number of inputs.
out_channels (int): Channel number of outputs.
kernel_size (int or tuple): Size of the convolving kernel.
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 1
filter_type (str): Type of filter. Options are 'conv2d' and 'linear'.
Default: 'conv2d'.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, filter_type='conv2d'):
super().__init__()
self.out_channels = out_channels
filter_type = filter_type.lower()
if filter_type == 'conv2d':
self.filter = nn.Conv2d(in_channels, 2 * out_channels,
kernel_size=kernel_size, stride=stride, padding=padding)
elif filter_type == 'linear':
self.filter = nn.Linear(in_channels, 2 * out_channels)
else:
raise ValueError(
f"'filter_type' should be 'conv2d' or 'linear', but got {filter_type}"
)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor.
Returns:
Tensor: Forward results.
"""
x = self.filter(x)
out = torch.chunk(x, chunks=2, dim=1)
return torch.max(out[0], out[1])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_eq_gt_lt_maximum_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, out_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 64
x3 = xindex % 64
x1 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3 + 128 * x2), xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + x3 + 128 * x2), xmask)
tmp4 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = triton_helpers.maximum(tmp2, tmp5)
tmp7 = tmp2 == tmp5
tmp8 = tmp2 > tmp5
tmp9 = tmp2 < tmp5
tl.store(out_ptr0 + x4, tmp6, xmask)
tl.store(out_ptr1 + x4, tmp7, xmask)
tl.store(out_ptr2 + x4, tmp8, xmask)
tl.store(out_ptr3 + x4, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 8, 4, 4), (128, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_eq_gt_lt_maximum_0[grid(256)](buf0, primals_2,
buf1, buf2, buf3, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
return buf1, primals_1, primals_3, buf2, buf3, buf4
class MaxFeatureNew(nn.Module):
"""Conv2d or Linear layer with max feature selector
Generate feature maps with double channels, split them and select the max
feature.
Args:
in_channels (int): Channel number of inputs.
out_channels (int): Channel number of outputs.
kernel_size (int or tuple): Size of the convolving kernel.
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 1
filter_type (str): Type of filter. Options are 'conv2d' and 'linear'.
Default: 'conv2d'.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, filter_type='conv2d'):
super().__init__()
self.out_channels = out_channels
filter_type = filter_type.lower()
if filter_type == 'conv2d':
self.filter = nn.Conv2d(in_channels, 2 * out_channels,
kernel_size=kernel_size, stride=stride, padding=padding)
elif filter_type == 'linear':
self.filter = nn.Linear(in_channels, 2 * out_channels)
else:
raise ValueError(
f"'filter_type' should be 'conv2d' or 'linear', but got {filter_type}"
)
def forward(self, input_0):
primals_1 = self.filter.weight
primals_2 = self.filter.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| akimotty877/mmediting | MaxFeature | false | 3,070 | [
"Apache-2.0"
] | 0 | cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Conv2d or Linear layer with max feature selector
Generate feature maps with double channels, split them and select the max
feature.
Args:
in_channels (int): Channel number of inputs.
out_channels (int): Channel number of outputs.
kernel_size (int or tuple): Size of the convolving kernel.
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 1
filter_type (str): Type of filter. Options are 'conv2d' and 'linear'.
Default: 'conv2d'.
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, filter_type='conv2d'):
super().__init__()
self.out_channels = out_channels
filter_type = filter_type.lower()
if filter_type == 'conv2d':
self.filter = nn.Conv2d(in_channels, 2 * out_channels,
kernel_size=kernel_size, stride=stride, padding=padding)
elif filter_type == 'linear':
self.filter = nn.Linear(in_channels, 2 * out_channels)
else:
raise ValueError(
f"'filter_type' should be 'conv2d' or 'linear', but got {filter_type}"
)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor.
Returns:
Tensor: Forward results.
"""
x = self.filter(x)
out = torch.chunk(x, chunks=2, dim=1)
return torch.max(out[0], out[1])
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [4, 4]
|
PlainRefiner | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_7/inductor_cache/7b/c7bwvkzrfyqe7on7r6rupptsqxo3x6vxvpuiow36csr3chlibccz.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_7/inductor_cache/st/csta3znaz5s4as2tgqjkidnl23bqeegkhtqw4xrobqdt7oqbloam.py
# Topologically Sorted Source Nodes: [raw_refine, add, pred_refine], Original ATen: [aten.convolution, aten.add, aten.sigmoid]
# Source node to ATen node mapping:
# add => add
# pred_refine => sigmoid
# raw_refine => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_10, %convolution_3), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add,), kwargs = {})
triton_poi_fused_add_convolution_sigmoid_1 = async_compile.triton('triton_poi_fused_add_convolution_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_sigmoid_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = tl.sigmoid(tmp5)
tl.store(out_ptr0 + (x3), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (1, ), (1, ))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, out_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf3, primals_5, 4096, grid=grid(4096), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, out_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_0.run(buf5, primals_7, 4096, grid=grid(4096), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [raw_refine], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [raw_refine, add, pred_refine], Original ATen: [aten.convolution, aten.add, aten.sigmoid]
triton_poi_fused_add_convolution_sigmoid_1.run(primals_10, buf6, primals_9, buf7, 256, grid=grid(256), stream=stream0)
del buf6
del primals_10
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8, buf1, buf3, buf5, buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class PlainRefiner(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super().__init__()
assert pretrained is None, 'pretrained not supported yet'
self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3,
padding=1)
self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1
)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, x, raw_alpha):
"""Forward function.
Args:
x (Tensor): The input feature map of refiner.
raw_alpha (Tensor): The raw predicted alpha matte.
Returns:
Tensor: The refined alpha matte.
"""
out = self.relu(self.refine_conv1(x))
out = self.relu(self.refine_conv2(out))
out = self.relu(self.refine_conv3(out))
raw_refine = self.refine_pred(out)
pred_refine = torch.sigmoid(raw_alpha + raw_refine)
return pred_refine
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_sigmoid_1(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 + tmp3
tmp5 = tmp0 + tmp4
tmp6 = tl.sigmoid(tmp5)
tl.store(out_ptr0 + x3, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (64,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (1, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_9, (1,), (1,))
assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(4096)](buf1, primals_2,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_0[grid(4096)](buf3, primals_5,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 64, 4, 4), (1024, 16, 4, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_relu_0[grid(4096)](buf5, primals_7,
4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 1, 4, 4), (16, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_sigmoid_1[grid(256)](primals_10,
buf6, primals_9, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf6
del primals_10
del primals_9
return (buf7, primals_1, primals_3, primals_4, primals_6, primals_8,
buf1, buf3, buf5, buf7)
class PlainRefinerNew(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super().__init__()
assert pretrained is None, 'pretrained not supported yet'
self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3,
padding=1)
self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1
)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, input_0, input_1):
primals_1 = self.refine_conv1.weight
primals_2 = self.refine_conv1.bias
primals_4 = self.refine_conv2.weight
primals_5 = self.refine_conv2.bias
primals_6 = self.refine_conv3.weight
primals_7 = self.refine_conv3.bias
primals_8 = self.refine_pred.weight
primals_9 = self.refine_pred.bias
primals_3 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| akimotty877/mmediting | PlainRefiner | false | 3,071 | [
"Apache-2.0"
] | 0 | cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | https://github.com/akimotty877/mmediting/tree/cae872d6f3e867ba144c7c0dbc29a0ee1a29e5a6 | import torch
import torch.nn as nn
class Model(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super().__init__()
assert pretrained is None, 'pretrained not supported yet'
self.refine_conv1 = nn.Conv2d(4, conv_channels, kernel_size=3,
padding=1)
self.refine_conv2 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(conv_channels, conv_channels,
kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(conv_channels, 1, kernel_size=3, padding=1
)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, x, raw_alpha):
"""Forward function.
Args:
x (Tensor): The input feature map of refiner.
raw_alpha (Tensor): The raw predicted alpha matte.
Returns:
Tensor: The refined alpha matte.
"""
out = self.relu(self.refine_conv1(x))
out = self.relu(self.refine_conv2(out))
out = self.relu(self.refine_conv3(out))
raw_refine = self.refine_pred(out)
pred_refine = torch.sigmoid(raw_alpha + raw_refine)
return pred_refine
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return []
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.